code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
def UpperCamelCase ( lowerCAmelCase__ = 100_0000 ): '''simple docstring''' lowercase = set(range(3 , lowerCAmelCase__ , 2 ) ) primes.add(2 ) for p in range(3 , lowerCAmelCase__ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , lowerCAmelCase__ , lowerCAmelCase__ ) ) ) lowercase = [float(lowerCAmelCase__ ) for n in range(limit + 1 )] for p in primes: for n in range(lowerCAmelCase__ , limit + 1 , lowerCAmelCase__ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'{solution() = }')
101
"""simple docstring""" from collections.abc import Sequence def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_)) def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float: '''simple docstring''' __lowercase = 0.0 for coeff in reversed(UpperCamelCase_): __lowercase = result * x + coeff return result if __name__ == "__main__": _a = (0.0, 0.0, 5.0, 9.3, 7.0) _a = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
17
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
102
"""simple docstring""" import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class _lowerCAmelCase ( pl.LightningModule ): """simple docstring""" def __init__( self : Optional[Any], UpperCAmelCase__ : str ): super().__init__() __lowercase = model __lowercase = 2 __lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels ) def _lowercase ( self : Optional[int] ): pass def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str: '''simple docstring''' __lowercase = LongformerModel.from_pretrained(UpperCamelCase_) __lowercase = LightningModel(UpperCamelCase_) __lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu")) lightning_model.load_state_dict(ckpt["state_dict"]) # init longformer question answering model __lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict()) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict()) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(UpperCamelCase_) print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""") if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
17
0
from datetime import datetime as dt import os from github import Github A__ : List[str] = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def UpperCamelCase( ): lowerCAmelCase_ : Union[str, Any] = Github(os.environ['''GITHUB_TOKEN'''] ) lowerCAmelCase_ : Tuple = g.get_repo('''huggingface/transformers''' ) lowerCAmelCase_ : int = repo.get_issues(state='''open''' ) for issue in open_issues: lowerCAmelCase_ : Optional[Any] = sorted([comment for comment in issue.get_comments()] ,key=lambda __UpperCamelCase : i.created_at ,reverse=__UpperCamelCase ) lowerCAmelCase_ : Tuple = comments[0] if len(__UpperCamelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
103
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[int] ): if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, ) assert hasattr(self, "env" ) def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ): # configuration for running training on smdistributed Model Parallel __lowercase = { "enabled": True, "processes_per_host": 8, } __lowercase = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } __lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} __lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 5_0_0, }, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", ) def _lowercase ( self : Tuple, UpperCAmelCase__ : int ): TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ): # create estimator __lowercase = self.create_estimator(UpperCAmelCase__ ) # run training estimator.fit() # result dataframe __lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis __lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) __lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping __lowercase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
17
0
'''simple docstring''' from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer'] SCREAMING_SNAKE_CASE : List[Any] = 'AutoImageProcessor' SCREAMING_SNAKE_CASE : Union[str, Any] = 'AutoTokenizer' def __init__( self : Dict ,lowercase__ : int ,lowercase__ : List[Any] ): super().__init__(lowercase__ ,lowercase__ ) __lowercase = self.image_processor def __call__( self : int ,lowercase__ : Dict=None ,lowercase__ : int=None ,lowercase__ : str=None ,**lowercase__ : str ): if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __lowercase = self.tokenizer(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ ) if images is not None: __lowercase = self.image_processor(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ ) if text is not None and images is not None: __lowercase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase__ ) ,tensor_type=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ,*lowercase__ : Union[str, Any] ,**lowercase__ : Optional[Any] ): return self.tokenizer.batch_decode(*lowercase__ ,**lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ,*lowercase__ : Optional[int] ,**lowercase__ : Tuple ): return self.tokenizer.decode(*lowercase__ ,**lowercase__ ) @property def SCREAMING_SNAKE_CASE ( self : List[str] ): return ["input_ids", "attention_mask", "pixel_values"]
104
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : Tuple = "openai/whisper-base" __UpperCAmelCase : Union[str, Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __UpperCAmelCase : List[str] = "transcriber" __UpperCAmelCase : Optional[Any] = WhisperProcessor __UpperCAmelCase : str = WhisperForConditionalGeneration __UpperCAmelCase : List[str] = ["audio"] __UpperCAmelCase : Tuple = ["text"] def _lowercase ( self : str, UpperCAmelCase__ : int ): return self.pre_processor(UpperCAmelCase__, return_tensors="pt" ).input_features def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ): return self.model.generate(inputs=UpperCAmelCase__ ) def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int] ): return self.pre_processor.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )[0]
17
0
"""simple docstring""" import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class __UpperCamelCase ( a__ ): lowerCamelCase : List[str] =ComputeEnvironment.AMAZON_SAGEMAKER lowerCamelCase : str =True lowerCamelCase : Union[str, Any] ="""ml.p3.2xlarge""" lowerCamelCase : str ="""accelerate_sagemaker_execution_role""" lowerCamelCase : int ="""hf-sm""" lowerCamelCase : int ="""us-east-1""" lowerCamelCase : Tuple =1 lowerCamelCase : Any ="""accelerate-sagemaker-1""" lowerCamelCase : str ="""1.6""" lowerCamelCase : Tuple ="""4.4""" lowerCamelCase : Optional[int] ="""train.py""" lowerCamelCase : Optional[Any] =[ """--model_name_or_path""", """bert""", """--do_train""", """False""", """--epochs""", """3""", """--learning_rate""", """5e-5""", """--max_steps""", """50.5""", ] lowerCamelCase : Union[str, Any] =[ """--model_name_or_path""", """bert""", """--do_train""", """--do_test""", """False""", """--do_predict""", """--epochs""", """3""", """--learning_rate""", """5e-5""", """--max_steps""", """50.5""", ] class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> List[str]: # If no defaults are changed, `to_kwargs` returns an empty dict. a : str = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args["model_name_or_path"] , lowerCAmelCase__ ) assert isinstance(converted_args["do_train"] , lowerCAmelCase__ ) assert isinstance(converted_args["epochs"] , lowerCAmelCase__ ) assert isinstance(converted_args["learning_rate"] , lowerCAmelCase__ ) assert isinstance(converted_args["max_steps"] , lowerCAmelCase__ ) with pytest.raises(lowerCAmelCase__ ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
105
"""simple docstring""" import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]: '''simple docstring''' if isinstance(UpperCamelCase_, torch.Tensor): return image elif isinstance(UpperCamelCase_, PIL.Image.Image): __lowercase = [image] if isinstance(image[0], PIL.Image.Image): __lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] __lowercase = np.concatenate(UpperCamelCase_, axis=0) __lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0 __lowercase = image.transpose(0, 3, 1, 2) __lowercase = 2.0 * image - 1.0 __lowercase = torch.from_numpy(UpperCamelCase_) elif isinstance(image[0], torch.Tensor): __lowercase = torch.cat(UpperCamelCase_, dim=0) return image def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int: '''simple docstring''' if not isinstance(UpperCamelCase_, np.ndarray): __lowercase = True __lowercase = va.device __lowercase = va.cpu().numpy() __lowercase = va.cpu().numpy() __lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_))) if np.abs(UpperCamelCase_) > DOT_THRESHOLD: __lowercase = (1 - t) * va + t * va else: __lowercase = np.arccos(UpperCamelCase_) __lowercase = np.sin(UpperCamelCase_) __lowercase = theta_a * t __lowercase = np.sin(UpperCamelCase_) __lowercase = np.sin(theta_a - theta_t) / sin_theta_a __lowercase = sin_theta_t / sin_theta_a __lowercase = sa * va + sa * va if inputs_are_torch: __lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_) return va def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int: '''simple docstring''' __lowercase = F.normalize(UpperCamelCase_, dim=-1) __lowercase = F.normalize(UpperCamelCase_, dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]: '''simple docstring''' for param in model.parameters(): __lowercase = value class _lowerCAmelCase ( lowercase ): """simple docstring""" def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ): super().__init__() self.register_modules( vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, ) __lowercase = ( feature_extractor.size if isinstance(feature_extractor.size, UpperCAmelCase__ ) else feature_extractor.size["shortest_edge"] ) __lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std ) set_requires_grad(self.text_encoder, UpperCAmelCase__ ) set_requires_grad(self.clip_model, UpperCAmelCase__ ) def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowercase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase__ ) def _lowercase ( self : int ): self.enable_attention_slicing(UpperCAmelCase__ ) def _lowercase ( self : str ): set_requires_grad(self.vae, UpperCAmelCase__ ) def _lowercase ( self : Any ): set_requires_grad(self.vae, UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any] ): set_requires_grad(self.unet, UpperCAmelCase__ ) def _lowercase ( self : Any ): set_requires_grad(self.unet, UpperCAmelCase__ ) def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ): # get the original timestep using init_timestep __lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ ) __lowercase = max(num_inference_steps - init_timestep, 0 ) __lowercase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ): if not isinstance(UpperCAmelCase__, torch.Tensor ): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" ) __lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ ) ] __lowercase = torch.cat(UpperCAmelCase__, dim=0 ) else: __lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 0.18_215 * init_latents __lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 ) __lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ ) # get latents __lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = init_latents return latents def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ): __lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): __lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) ) __lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," ) def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ): __lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ ) __lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half() __lowercase = self.clip_model.get_image_features(UpperCAmelCase__ ) __lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ ) __lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 ) return image_embeddings_clip @torch.enable_grad() def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ): __lowercase = latents.detach().requires_grad_() __lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) # predict the noise residual __lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): __lowercase = self.scheduler.alphas_cumprod[timestep] __lowercase = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 __lowercase = torch.sqrt(UpperCAmelCase__ ) __lowercase = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler, UpperCAmelCase__ ): __lowercase = self.scheduler.sigmas[index] __lowercase = latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 1 / 0.18_215 * sample __lowercase = self.vae.decode(UpperCAmelCase__ ).sample __lowercase = (image / 2 + 0.5).clamp(0, 1 ) __lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ ) __lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype ) __lowercase = self.clip_model.get_image_features(UpperCAmelCase__ ) __lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ ) __lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale __lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0] if isinstance(self.scheduler, UpperCAmelCase__ ): __lowercase = latents.detach() + grads * (sigma**2) __lowercase = noise_pred_original else: __lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ): if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1: __lowercase = [generator] + [None] * (batch_size - 1) __lowercase = [ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] __lowercase = [x[0] for x in coca_is_none if x[1]] __lowercase = ", ".join(UpperCAmelCase__ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(UpperCAmelCase__ ): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) __lowercase = self.get_image_description(UpperCAmelCase__ ) if style_prompt is None: if len(UpperCAmelCase__ ): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) __lowercase = self.get_image_description(UpperCAmelCase__ ) # get prompt text embeddings for content and style __lowercase = self.tokenizer( UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", ) __lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] __lowercase = self.tokenizer( UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", ) __lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] __lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # duplicate text embeddings for each generation per prompt __lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 ) # set timesteps __lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) __lowercase = {} if accepts_offset: __lowercase = 1 self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) __lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device ) __lowercase = timesteps[:1].repeat(UpperCAmelCase__ ) # Preprocess image __lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.prepare_latents( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ ) __lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.prepare_latents( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ ) __lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) if clip_guidance_scale > 0: __lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = slerp( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __lowercase = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __lowercase = content_text_input.input_ids.shape[-1] __lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" ) __lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt __lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowercase = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8) __lowercase = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps __lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to( self.device ) else: __lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) __lowercase = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __lowercase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __lowercase = {} if accepts_eta: __lowercase = eta # check if the scheduler accepts generator __lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: __lowercase = generator with self.progress_bar(total=UpperCAmelCase__ ): for i, t in enumerate(UpperCAmelCase__ ): # expand the latents if we are doing classifier free guidance __lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) # predict the noise residual __lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample # perform classifier free guidance if do_classifier_free_guidance: __lowercase ,__lowercase = noise_pred.chunk(2 ) __lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: __lowercase = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) __lowercase ,__lowercase = self.cond_fn( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) # compute the previous noisy sample x_t -> x_t-1 __lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 1 / 0.18_215 * latents __lowercase = self.vae.decode(UpperCAmelCase__ ).sample __lowercase = (image / 2 + 0.5).clamp(0, 1 ) __lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": __lowercase = self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
17
0
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __UpperCamelCase : int = logging.get_logger(__name__) __UpperCamelCase : int = TypeVar('''DatasetType''', Dataset, IterableDataset) def __SCREAMING_SNAKE_CASE ( A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = "first_exhausted" , ): from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('''Unable to interleave an empty list of datasets.''' ) for i, dataset in enumerate(A_ ): if not isinstance(A_ , (Dataset, IterableDataset) ): if isinstance(A_ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' '''is an empty dataset dictionary.''' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A_ )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A_ ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A_ ).__name__}.' ) if i == 0: lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = ( (Dataset, IterableDataset) if isinstance(A_ , A_ ) else (IterableDataset, Dataset) ) elif not isinstance(A_ , A_ ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' ) if dataset_type is Dataset: return _interleave_map_style_datasets( A_ , A_ , A_ , info=A_ , split=A_ , stopping_strategy=A_ ) else: return _interleave_iterable_datasets( A_ , A_ , A_ , info=A_ , split=A_ , stopping_strategy=A_ ) def __SCREAMING_SNAKE_CASE ( A_ , A_ = None , A_ = None , A_ = 0 , ): if not dsets: raise ValueError('''Unable to concatenate an empty list of datasets.''' ) for i, dataset in enumerate(A_ ): if not isinstance(A_ , (Dataset, IterableDataset) ): if isinstance(A_ , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ' '''is an empty dataset dictionary.''' ) raise ValueError( f'Dataset at position {i} has at least one split: {list(A_ )}\n' f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A_ ) )}\']' ) raise ValueError( f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A_ ).__name__}.' ) if i == 0: lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = ( (Dataset, IterableDataset) if isinstance(A_ , A_ ) else (IterableDataset, Dataset) ) elif not isinstance(A_ , A_ ): raise ValueError( f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(A_ , info=A_ , split=A_ , axis=A_ ) else: return _concatenate_iterable_datasets(A_ , info=A_ , split=A_ , axis=A_ )
106
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class _lowerCAmelCase : """simple docstring""" __UpperCAmelCase : Tuple = XGLMConfig __UpperCAmelCase : Optional[Any] = {} __UpperCAmelCase : Union[str, Any] = "gelu" def __init__( self : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=1_4, UpperCAmelCase__ : str=7, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[Any]=True, UpperCAmelCase__ : int=True, UpperCAmelCase__ : List[str]=9_9, UpperCAmelCase__ : Union[str, Any]=3_2, UpperCAmelCase__ : Union[str, Any]=2, UpperCAmelCase__ : Union[str, Any]=4, UpperCAmelCase__ : Tuple=3_7, UpperCAmelCase__ : List[Any]="gelu", UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Tuple=5_1_2, UpperCAmelCase__ : Optional[Any]=0.02, ): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_labels __lowercase = vocab_size __lowercase = d_model __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = ffn_dim __lowercase = activation_function __lowercase = activation_dropout __lowercase = attention_dropout __lowercase = max_position_embeddings __lowercase = initializer_range __lowercase = None __lowercase = 0 __lowercase = 2 __lowercase = 1 def _lowercase ( self : Union[str, Any] ): return XGLMConfig.from_pretrained("facebook/xglm-564M" ) def _lowercase ( self : Tuple ): __lowercase = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = self.get_config() __lowercase = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowercase ( self : List[Any] ): return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=UpperCAmelCase__, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=UpperCAmelCase__, ) def _lowercase ( self : Dict ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) = config_and_inputs __lowercase = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCAmelCase : Any = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : List[str] = False __UpperCAmelCase : int = False def _lowercase ( self : Optional[Any] ): __lowercase = TFXGLMModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__, n_embd=3_7 ) def _lowercase ( self : Any ): self.config_tester.run_common_tests() @slow def _lowercase ( self : List[str] ): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = TFXGLMModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." ) def _lowercase ( self : int ): super().test_resize_token_embeddings() @require_tf class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int]=True ): __lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) __lowercase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]], dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __lowercase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on __lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), UpperCAmelCase__ ) @slow def _lowercase ( self : List[Any] ): __lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) __lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) tf.random.set_seed(0 ) __lowercase = tokenizer("Today is a nice day and", return_tensors="tf" ) __lowercase = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0" ): __lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, seed=[7, 0] ) __lowercase = tokenizer.decode(output_ids[0], skip_special_tokens=UpperCAmelCase__ ) __lowercase = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ ) @slow def _lowercase ( self : Dict ): __lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) __lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) __lowercase = "left" # use different length sentences to test batching __lowercase = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] __lowercase = tokenizer(UpperCAmelCase__, return_tensors="tf", padding=UpperCAmelCase__ ) __lowercase = inputs["input_ids"] __lowercase = model.generate(input_ids=UpperCAmelCase__, attention_mask=inputs["attention_mask"], max_new_tokens=1_2 ) __lowercase = tokenizer(sentences[0], return_tensors="tf" ).input_ids __lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 ) __lowercase = tokenizer(sentences[1], return_tensors="tf" ).input_ids __lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 ) __lowercase = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ ) __lowercase = tokenizer.decode(output_non_padded[0], skip_special_tokens=UpperCAmelCase__ ) __lowercase = tokenizer.decode(output_padded[0], skip_special_tokens=UpperCAmelCase__ ) __lowercase = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__, [non_padded_sentence, padded_sentence] )
17
0
from __future__ import annotations from scipy.special import comb # type: ignore class snake_case__ : """simple docstring""" def __init__( self : Any , __lowerCamelCase : list[tuple[float, float]] ) -> Tuple: a = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. a = len(__lowerCamelCase ) - 1 def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : float ) -> list[float]: assert 0 <= t <= 1, "Time t must be between 0 and 1." a = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , __lowerCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(__lowerCamelCase ) , 5 ) == 1 return output_values def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : float ) -> tuple[float, float]: assert 0 <= t <= 1, "Time t must be between 0 and 1." a = self.basis_function(__lowerCamelCase ) a = 0.0 a = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : float = 0.01 ) -> List[str]: from matplotlib import pyplot as plt # type: ignore a = [] # x coordinates of points to plot a = [] # y coordinates of points to plot a = 0.0 while t <= 1: a = self.bezier_curve_function(__lowerCamelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size a = [i[0] for i in self.list_of_points] a = [i[1] for i in self.list_of_points] plt.plot( __lowerCamelCase , __lowerCamelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , ) plt.scatter(__lowerCamelCase , __lowerCamelCase , color="red" , label="Control Points" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
107
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _a = '__DUMMY_TRANSFORMERS_USER__' _a = 'Dummy User' _a = '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' _a = 'https://hub-ci.huggingface.co' _a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}' _a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}' _a = Path('~/.huggingface/hub_ci_token').expanduser() @pytest.fixture def _A ( UpperCamelCase_ : List[Any]) -> Tuple: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : int) -> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT", UpperCamelCase_) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : str) -> Dict: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : List[Any]) -> List[str]: '''simple docstring''' HfFolder.save_token(UpperCamelCase_) yield HfFolder.delete_token() @pytest.fixture(scope="session") def _A ( ) -> List[Any]: '''simple docstring''' return HfApi(endpoint=UpperCamelCase_) @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi) -> List[Any]: '''simple docstring''' __lowercase = HfFolder.get_token() HfFolder.save_token(UpperCamelCase_) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : Dict) -> int: '''simple docstring''' def _cleanup_repo(UpperCamelCase_ : Optional[int]): hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") return _cleanup_repo @pytest.fixture def _A ( UpperCamelCase_ : str) -> Any: '''simple docstring''' @contextmanager def _temporary_repo(UpperCamelCase_ : Any): try: yield repo_id finally: cleanup_repo(UpperCamelCase_) return _temporary_repo @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : str, UpperCamelCase_ : Optional[int]) -> List[Any]: '''simple docstring''' __lowercase = F"""repo_txt_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data/text_data.txt", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Any, UpperCamelCase_ : Dict) -> Optional[int]: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]) -> int: '''simple docstring''' __lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Dict, UpperCamelCase_ : Any) -> int: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> List[Any]: '''simple docstring''' __lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> str: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
17
0
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : Any ="gptj" a : Any ={ "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=50_400 , snake_case__=2_048 , snake_case__=4_096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-5 , snake_case__=0.02 , snake_case__=True , snake_case__=50_256 , snake_case__=50_256 , snake_case__=False , **snake_case__ , ): """simple docstring""" lowerCAmelCase : Any = vocab_size lowerCAmelCase : Tuple = n_positions lowerCAmelCase : List[Any] = n_embd lowerCAmelCase : Any = n_layer lowerCAmelCase : List[Any] = n_head lowerCAmelCase : Optional[int] = n_inner lowerCAmelCase : List[str] = rotary_dim lowerCAmelCase : Dict = activation_function lowerCAmelCase : Dict = resid_pdrop lowerCAmelCase : List[Any] = embd_pdrop lowerCAmelCase : List[str] = attn_pdrop lowerCAmelCase : Optional[int] = layer_norm_epsilon lowerCAmelCase : Optional[int] = initializer_range lowerCAmelCase : int = use_cache lowerCAmelCase : Dict = bos_token_id lowerCAmelCase : List[str] = eos_token_id super().__init__( bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ ) class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ): """simple docstring""" super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ ) if not getattr(self._config , "pad_token_id" , snake_case__ ): # TODO: how to do that better? lowerCAmelCase : Any = 0 @property def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Dict = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(snake_case__ , direction="inputs" ) lowerCAmelCase : int = {0: "batch", 1: "past_sequence + sequence"} else: lowerCAmelCase : Optional[int] = {0: "batch", 1: "sequence"} return common_inputs @property def lowercase__ ( self ): """simple docstring""" return self._config.n_layer @property def lowercase__ ( self ): """simple docstring""" return self._config.n_head def lowercase__ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ): """simple docstring""" lowerCAmelCase : Tuple = super(snake_case__ , self ).generate_dummy_inputs( snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) # We need to order the input in the way they appears in the forward() lowerCAmelCase : List[str] = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCAmelCase , lowerCAmelCase : Dict = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCAmelCase : Dict = seqlen + 2 lowerCAmelCase : Dict = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCAmelCase : Union[str, Any] = [ (torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers ) ] lowerCAmelCase : Optional[Any] = common_inputs["attention_mask"] if self.use_past: lowerCAmelCase : Optional[Any] = ordered_inputs["attention_mask"].dtype lowerCAmelCase : Union[str, Any] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 ) return ordered_inputs @property def lowercase__ ( self ): """simple docstring""" return 13
108
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : int = "time_series_transformer" __UpperCAmelCase : Any = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self : int, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : str = "student_t", UpperCAmelCase__ : str = "nll", UpperCAmelCase__ : int = 1, UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7], UpperCAmelCase__ : Optional[Union[str, bool]] = "mean", UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : str = "gelu", UpperCAmelCase__ : int = 6_4, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : int = 1_0_0, UpperCAmelCase__ : float = 0.02, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : List[str], ): # time series specific configuration __lowercase = prediction_length __lowercase = context_length or prediction_length __lowercase = distribution_output __lowercase = loss __lowercase = input_size __lowercase = num_time_features __lowercase = lags_sequence __lowercase = scaling __lowercase = num_dynamic_real_features __lowercase = num_static_real_features __lowercase = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) __lowercase = cardinality else: __lowercase = [0] if embedding_dimension and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) __lowercase = embedding_dimension else: __lowercase = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality] __lowercase = num_parallel_samples # Transformer architecture configuration __lowercase = input_size * len(UpperCAmelCase__ ) + self._number_of_features __lowercase = d_model __lowercase = encoder_attention_heads __lowercase = decoder_attention_heads __lowercase = encoder_ffn_dim __lowercase = decoder_ffn_dim __lowercase = encoder_layers __lowercase = decoder_layers __lowercase = dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = encoder_layerdrop __lowercase = decoder_layerdrop __lowercase = activation_function __lowercase = init_std __lowercase = use_cache super().__init__(is_encoder_decoder=UpperCAmelCase__, **UpperCAmelCase__ ) @property def _lowercase ( self : Optional[Any] ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
17
0
"""simple docstring""" A: Dict = 8.314_4598 def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if temperature < 0: raise Exception("""Temperature cannot be less than 0 K""" ) if molar_mass <= 0: raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example A: Dict = 3_0_0 A: Dict = 2_8 A: str = rms_speed_of_molecule(temperature, molar_mass) print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
109
"""simple docstring""" import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _lowerCAmelCase : """simple docstring""" @staticmethod def _lowercase ( *UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : List[Any] ): pass def _A ( UpperCamelCase_ : Union[str, Any]) -> Any: '''simple docstring''' return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. _a = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any] ): __lowercase = pipeline( "document-question-answering", model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ ) __lowercase = INVOICE_URL __lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) ) __lowercase = "What is the placebo?" __lowercase = [ { "image": load_image(UpperCAmelCase__ ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def _lowercase ( self : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any ): __lowercase = dqa_pipeline(UpperCAmelCase__, top_k=2 ) self.assertEqual( UpperCAmelCase__, [ [ {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )}, {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )}, ] ] * 3, ) @require_torch @require_detectrona @require_pytesseract def _lowercase ( self : Dict ): __lowercase = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2" ) __lowercase = INVOICE_URL __lowercase = "How many cats are there?" __lowercase = [ {"score": 0.0_001, "answer": "oy 2312/2019", "start": 3_8, "end": 3_9}, {"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 3_8, "end": 4_0}, ] __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably __lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual(UpperCAmelCase__, [] ) # We can optionnally pass directly the words and bounding boxes __lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png" __lowercase = [] __lowercase = [] __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, words=UpperCAmelCase__, boxes=UpperCAmelCase__, top_k=2 ) self.assertEqual(UpperCAmelCase__, [] ) @slow @require_torch @require_detectrona @require_pytesseract def _lowercase ( self : List[str] ): __lowercase = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ] * 2, ) @slow @require_torch @require_detectrona @require_pytesseract def _lowercase ( self : Dict ): __lowercase = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=5_0, ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6}, ] ] * 2, ) @slow @require_torch @require_pytesseract @require_vision def _lowercase ( self : Optional[Any] ): __lowercase = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ ) __lowercase = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ], ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ] ] * 2, ) __lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) ) # This model should also work if `image` is set to None __lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ], ) @slow @require_torch @require_pytesseract @require_vision def _lowercase ( self : Union[str, Any] ): __lowercase = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ ) __lowercase = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", max_seq_len=5_0, ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6}, ] ] * 2, ) __lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) ) # This model should also work if `image` is set to None __lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) @slow @require_torch def _lowercase ( self : Dict ): __lowercase = pipeline( "document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def _lowercase ( self : List[Any] ): pass
17
0
def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = generate_pascal_triangle(SCREAMING_SNAKE_CASE ) for row_idx in range(SCREAMING_SNAKE_CASE ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=''' ''' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=''' ''' ) else: print(triangle[row_idx][col_idx] , end='''''' ) print() def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) lowercase__ = [] for current_row_idx in range(SCREAMING_SNAKE_CASE ): lowercase__ = populate_current_row(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) triangle.append(SCREAMING_SNAKE_CASE ) return triangle def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 lowercase__ , lowercase__ = 1, 1 for current_col_idx in range(1 , SCREAMING_SNAKE_CASE ): calculate_current_element( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return current_row def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ): """simple docstring""" lowercase__ = triangle[current_row_idx - 1][current_col_idx - 1] lowercase__ = triangle[current_row_idx - 1][current_col_idx] lowercase__ = above_to_left_elt + above_to_right_elt def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) lowercase__ = [[1]] for row_index in range(1 , SCREAMING_SNAKE_CASE ): lowercase__ = [0] + result[-1] + [0] lowercase__ = row_index + 1 # Calculate the number of distinct elements in a row lowercase__ = sum(divmod(SCREAMING_SNAKE_CASE , 2 ) ) lowercase__ = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] lowercase__ = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() lowercase__ = row_first_half + row_second_half result.append(SCREAMING_SNAKE_CASE ) return result def _a ( ): """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: lowercase__ = f'{func.__name__}({value})' lowercase__ = timeit(f'__main__.{call}' , setup='''import __main__''' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(f'{call:38} -- {timing:.4f} seconds' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
110
"""simple docstring""" import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() _a = 2 class _lowerCAmelCase : """simple docstring""" def __init__( self : Dict, *, # begin keyword-only arguments UpperCAmelCase__ : str="<s>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : str="</s>", UpperCAmelCase__ : Optional[Any]="<unk>", UpperCAmelCase__ : List[Any]=None, ): __lowercase ,__lowercase ,__lowercase ,__lowercase = bos, unk, pad, eos __lowercase = [] __lowercase = [] __lowercase = {} __lowercase = self.add_symbol(UpperCAmelCase__ ) __lowercase = self.add_symbol(UpperCAmelCase__ ) __lowercase = self.add_symbol(UpperCAmelCase__ ) __lowercase = self.add_symbol(UpperCAmelCase__ ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(UpperCAmelCase__ ) __lowercase = len(self.symbols ) def __eq__( self : List[str], UpperCAmelCase__ : Dict ): return self.indices == other.indices def __getitem__( self : Optional[int], UpperCAmelCase__ : List[str] ): if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : str ): return len(self.symbols ) def __contains__( self : Any, UpperCAmelCase__ : Optional[Any] ): return sym in self.indices @classmethod def _lowercase ( cls : List[Any], UpperCAmelCase__ : Optional[Any] ): __lowercase = cls() d.add_from_file(UpperCAmelCase__ ) return d def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : str=False ): if word in self.indices and not overwrite: __lowercase = self.indices[word] __lowercase = self.count[idx] + n return idx else: __lowercase = len(self.symbols ) __lowercase = idx self.symbols.append(UpperCAmelCase__ ) self.count.append(UpperCAmelCase__ ) return idx def _lowercase ( self : Any, UpperCAmelCase__ : str ): return 0 def _lowercase ( self : Tuple, UpperCAmelCase__ : List[Any] ): if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): try: with open(UpperCAmelCase__, "r", encoding="utf-8" ) as fd: self.add_from_file(UpperCAmelCase__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCAmelCase__ ) ) return __lowercase = f.readlines() __lowercase = self._load_meta(UpperCAmelCase__ ) for line in lines[indices_start_line:]: try: __lowercase ,__lowercase = line.rstrip().rsplit(" ", 1 ) if field == "#fairseq:overwrite": __lowercase = True __lowercase ,__lowercase = line.rsplit(" ", 1 ) else: __lowercase = False __lowercase = int(UpperCAmelCase__ ) __lowercase = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(UpperCAmelCase__ ) ) self.add_symbol(UpperCAmelCase__, n=UpperCAmelCase__, overwrite=UpperCAmelCase__ ) except ValueError: raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" ) def _A ( UpperCamelCase_ : int) -> str: '''simple docstring''' __lowercase = dict((re.sub(r"@@$", "", UpperCamelCase_), v) if k.endswith("@@") else (re.sub(r"$", "</w>", UpperCamelCase_), v) for k, v in d.items()) __lowercase = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[F"""{k}</w>"""] __lowercase = d[k] # restore return da def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> List[Any]: '''simple docstring''' if not os.path.exists(UpperCamelCase_): raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""") os.makedirs(UpperCamelCase_, exist_ok=UpperCamelCase_) print(F"""Writing results to {pytorch_dump_folder_path}""") # handle various types of models __lowercase = os.path.join(UpperCamelCase_, "checkpoint.pt") if not os.path.isfile(UpperCamelCase_): raise ValueError(F"""path to the file {checkpoint_file} does not exist!""") __lowercase = torch.load(UpperCamelCase_, map_location="cpu") __lowercase = chkpt["cfg"]["model"] # dicts __lowercase = os.path.join(UpperCamelCase_, "dict.txt") if not os.path.isfile(UpperCamelCase_): raise ValueError(F"""path to the file {dict_file} does not exist!""") __lowercase = Dictionary.load(UpperCamelCase_) __lowercase = rewrite_dict_keys(src_dict.indices) __lowercase = len(UpperCamelCase_) __lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["vocab_file"]) print(F"""Generating {src_vocab_file} of {src_vocab_size} records""") with open(UpperCamelCase_, "w", encoding="utf-8") as f: f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_)) # merges_file (bpecodes) __lowercase = os.path.join(UpperCamelCase_, "bpecodes") if not os.path.isfile(UpperCamelCase_): raise ValueError(F"""path to the file {bpecodes_file} does not exist!""") __lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["merges_file"]) shutil.copyfile(UpperCamelCase_, UpperCamelCase_) # model config __lowercase = os.path.join(UpperCamelCase_, "config.json") __lowercase = { "activation_dropout": args["activation_dropout"], "architectures": ["BioGptForCausalLM"], "attention_probs_dropout_prob": args["attention_dropout"], "bos_token_id": 0, "eos_token_id": 2, "hidden_act": args["activation_fn"], "hidden_dropout_prob": args["dropout"], "hidden_size": args["decoder_embed_dim"], "initializer_range": 0.02, "intermediate_size": args["decoder_ffn_embed_dim"], "layer_norm_eps": 1E-12, "layerdrop": args["decoder_layerdrop"], "max_position_embeddings": args["max_target_positions"], "model_type": "biogpt", "num_attention_heads": args["decoder_attention_heads"], "num_hidden_layers": args["decoder_layers"], "pad_token_id": 1, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_decoder_input_output_embed"], "vocab_size": src_vocab_size, } # good hparam defaults to start with print(F"""Generating {biogpt_model_config_file}""") with open(UpperCamelCase_, "w", encoding="utf-8") as f: f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_)) # tokenizer config __lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_) __lowercase = { "bos_token": "<s>", "eos_token": "</s>", "model_max_length": 1024, "pad_token": "<pad>", "special_tokens_map_file": None, "tokenizer_class": "BioGptTokenizer", "unk_token": "<unk>", } print(F"""Generating {biogpt_tokenizer_config_file}""") with open(UpperCamelCase_, "w", encoding="utf-8") as f: f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_)) # model __lowercase = chkpt["model"] # remove unneeded keys __lowercase = [ "decoder.version", ] for k in ignore_keys: model_state_dict.pop(UpperCamelCase_, UpperCamelCase_) __lowercase = list(model_state_dict.keys()) for layer_name in layer_names: if layer_name.endswith("output_projection.weight"): __lowercase = model_state_dict.pop(UpperCamelCase_) else: __lowercase = model_state_dict.pop(UpperCamelCase_) __lowercase = BioGptConfig.from_pretrained(UpperCamelCase_) __lowercase = BioGptForCausalLM(UpperCamelCase_) # check that it loads ok model_new.load_state_dict(UpperCamelCase_) # save __lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_) print(F"""Generating {pytorch_weights_dump_path}""") torch.save(UpperCamelCase_, UpperCamelCase_) print("Conversion is done!") if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--biogpt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _a = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
17
0
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowercase ( unittest.TestCase ): @slow def a__ ( self ) -> Union[str, Any]: _A : List[Any] = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" ) _A : Optional[int] = AutoTokenizer.from_pretrained("""google/mt5-small""" ) _A : Optional[Any] = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids _A : Optional[int] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids _A : Union[str, Any] = shift_tokens_right(UpperCAmelCase__ , model.config.pad_token_id , model.config.decoder_start_token_id ) _A : Optional[Any] = model(UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ ).logits _A : Optional[Any] = optax.softmax_cross_entropy(UpperCAmelCase__ , onehot(UpperCAmelCase__ , logits.shape[-1] ) ).mean() _A : Tuple = -(labels.shape[-1] * loss.item()) _A : int = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
26
"""simple docstring""" from __future__ import annotations from typing import Any class _lowerCAmelCase : """simple docstring""" def __init__( self : Any, UpperCAmelCase__ : int ): __lowercase = num_of_nodes __lowercase = [] __lowercase = {} def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ): self.m_edges.append([u_node, v_node, weight] ) def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def _lowercase ( self : List[Any], UpperCAmelCase__ : int ): if self.m_component[u_node] != u_node: for k in self.m_component: __lowercase = self.find_component(UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ): if component_size[u_node] <= component_size[v_node]: __lowercase = v_node component_size[v_node] += component_size[u_node] self.set_component(UpperCAmelCase__ ) elif component_size[u_node] >= component_size[v_node]: __lowercase = self.find_component(UpperCAmelCase__ ) component_size[u_node] += component_size[v_node] self.set_component(UpperCAmelCase__ ) def _lowercase ( self : Any ): __lowercase = [] __lowercase = 0 __lowercase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) __lowercase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: __lowercase ,__lowercase ,__lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): __lowercase = [u, v, w] for edge in minimum_weight_edge: if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase ,__lowercase ,__lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 __lowercase = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def _A ( ) -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
17
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowercase_ ( metaclass=__lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = ["keras_nlp"] def __init__( self : str , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Union[str, Any] ): requires_backends(self , ['keras_nlp'] )
315
"""simple docstring""" from math import sqrt def _A ( UpperCamelCase_ : int) -> int: '''simple docstring''' __lowercase = 0 for i in range(1, int(sqrt(UpperCamelCase_) + 1)): if n % i == 0 and i != sqrt(UpperCamelCase_): total += i + n // i elif i == sqrt(UpperCamelCase_): total += i return total - n def _A ( UpperCamelCase_ : int = 10000) -> int: '''simple docstring''' __lowercase = sum( i for i in range(1, UpperCamelCase_) if sum_of_divisors(sum_of_divisors(UpperCamelCase_)) == i and sum_of_divisors(UpperCamelCase_) != i) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
17
0
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> float: if not nums: raise ValueError('''List is empty''' ) return sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
338
"""simple docstring""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _a = _symbol_database.Default() _a = _descriptor_pool.Default().AddSerializedFile( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) _a = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: _a = None _a = b'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _a = 45 _a = 15_81 _a = 15_17 _a = 15_70 _a = 15_84 _a = 17_93 _a = 17_95 _a = 19_16 _a = 18_64 _a = 19_05 _a = 19_19 _a = 24_29 _a = 22_08 _a = 24_18 _a = 23_23 _a = 24_07 # @@protoc_insertion_point(module_scope)
17
0
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler a__ : Optional[int] =16 a__ : List[str] =32 def lowercase__ ( __lowercase : Accelerator , __lowercase : int = 16 , __lowercase : str = "bert-base-cased" ) -> List[str]: """simple docstring""" __UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase_ ) __UpperCamelCase = load_dataset('glue' , 'mrpc' ) def tokenize_function(__lowercase : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCamelCase = datasets.map( UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCamelCase_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(__lowercase : Tuple ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCamelCase_ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(UpperCamelCase_ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. __UpperCamelCase = DataLoader( tokenized_datasets['train'] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ ) __UpperCamelCase = DataLoader( tokenized_datasets['validation'] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ ) return train_dataloader, eval_dataloader def lowercase__ ( __lowercase : Tuple , __lowercase : Tuple ) -> Tuple: """simple docstring""" __UpperCamelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase = config['lr'] __UpperCamelCase = int(config['num_epochs'] ) __UpperCamelCase = int(config['seed'] ) __UpperCamelCase = int(config['batch_size'] ) __UpperCamelCase = args.model_name_or_path set_seed(UpperCamelCase_ ) __UpperCamelCase , __UpperCamelCase = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase_ , return_dict=UpperCamelCase_ ) # Instantiate optimizer __UpperCamelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __UpperCamelCase = optimizer_cls(params=model.parameters() , lr=UpperCamelCase_ ) if accelerator.state.deepspeed_plugin is not None: __UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: __UpperCamelCase = 1 __UpperCamelCase = (len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __UpperCamelCase = get_linear_schedule_with_warmup( optimizer=UpperCamelCase_ , num_warmup_steps=0 , num_training_steps=UpperCamelCase_ , ) else: __UpperCamelCase = DummyScheduler(UpperCamelCase_ , total_num_steps=UpperCamelCase_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # We need to keep track of how many total steps we have iterated over __UpperCamelCase = 0 # We also need to keep track of the stating epoch so files are named properly __UpperCamelCase = 0 # Now we train the model __UpperCamelCase = evaluate.load('glue' , 'mrpc' ) __UpperCamelCase = 0 __UpperCamelCase = {} for epoch in range(UpperCamelCase_ , UpperCamelCase_ ): model.train() for step, batch in enumerate(UpperCamelCase_ ): __UpperCamelCase = model(**UpperCamelCase_ ) __UpperCamelCase = outputs.loss __UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() __UpperCamelCase = 0 for step, batch in enumerate(UpperCamelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCamelCase = model(**UpperCamelCase_ ) __UpperCamelCase = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __UpperCamelCase , __UpperCamelCase = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(UpperCamelCase_ ) - 1: __UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] __UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=UpperCamelCase_ , references=UpperCamelCase_ , ) __UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , UpperCamelCase_ ) __UpperCamelCase = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: __UpperCamelCase = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(UpperCamelCase_ , UpperCamelCase_ ) def lowercase__ ( ) -> List[str]: """simple docstring""" __UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=UpperCamelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCamelCase_ , ) parser.add_argument( '--output_dir' , type=UpperCamelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=UpperCamelCase_ , default=UpperCamelCase_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=UpperCamelCase_ , default=3 , help='Number of train epochs.' , ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(UpperCamelCase_ , UpperCamelCase_ ) if __name__ == "__main__": main()
53
"""simple docstring""" import baseaa def _A ( UpperCamelCase_ : str) -> bytes: '''simple docstring''' return baseaa.baaencode(string.encode("utf-8")) def _A ( UpperCamelCase_ : bytes) -> str: '''simple docstring''' return baseaa.baadecode(UpperCamelCase_).decode("utf-8") if __name__ == "__main__": _a = 'Hello World!' _a = baseaa_encode(test) print(encoded) _a = baseaa_decode(encoded) print(decoded)
17
0
class UpperCAmelCase : def __init__(self : int ) -> List[str]: '''simple docstring''' snake_case : Union[str, Any] = {} def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict: '''simple docstring''' print(self.vertex ) for i in self.vertex: print(UpperCAmelCase__ , " -> " , " -> ".join([str(UpperCAmelCase__ ) for j in self.vertex[i]] ) ) def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : int , snake_case__ : int ) -> List[Any]: '''simple docstring''' if from_vertex in self.vertex: self.vertex[from_vertex].append(UpperCAmelCase__ ) else: # else make a new vertex snake_case : List[Any] = [to_vertex] def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[Any]: '''simple docstring''' snake_case : Union[str, Any] = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(UpperCAmelCase__ , UpperCAmelCase__ ) def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : int , snake_case__ : list ) -> int: '''simple docstring''' snake_case : List[str] = True print(UpperCAmelCase__ , end=" " ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": __lowerCamelCase = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("""DFS:""") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
59
"""simple docstring""" def _A ( UpperCamelCase_ : Any) -> List[str]: '''simple docstring''' __lowercase ,__lowercase = [], [] while len(UpperCamelCase_) > 1: __lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_) start.append(UpperCamelCase_) end.append(UpperCamelCase_) collection.remove(UpperCamelCase_) collection.remove(UpperCamelCase_) end.reverse() return start + collection + end if __name__ == "__main__": _a = input('Enter numbers separated by a comma:\n').strip() _a = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
17
0
from ..utils import DummyObject, requires_backends class __lowercase ( metaclass=UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Tuple = ["torch", "scipy"] def __init__( self : Any , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Dict): requires_backends(self , ["torch", "scipy"]) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str , *lowerCAmelCase__ : str , **lowerCAmelCase__ : List[str]): requires_backends(cls , ["torch", "scipy"]) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Optional[Any]): requires_backends(cls , ["torch", "scipy"])
13
"""simple docstring""" def _A ( UpperCamelCase_ : list[int]) -> float: '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("List is empty") __lowercase = sum(UpperCamelCase_) / len(UpperCamelCase_) # Calculate the average return sum(abs(x - average) for x in nums) / len(UpperCamelCase_) if __name__ == "__main__": import doctest doctest.testmod()
17
0
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal A_ :str = datasets.utils.logging.get_logger(__name__) A_ :List[Any] = ['''names''', '''prefix'''] A_ :int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] A_ :Any = ['''encoding_errors''', '''on_bad_lines'''] A_ :Any = ['''date_format'''] @dataclass class __A ( datasets.BuilderConfig ): """simple docstring""" UpperCamelCase__ : str ="," UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : Optional[Union[int, List[int], str]] ="infer" UpperCamelCase__ : Optional[List[str]] =None UpperCamelCase__ : Optional[List[str]] =None UpperCamelCase__ : Optional[Union[int, str, List[int], List[str]]] =None UpperCamelCase__ : Optional[Union[List[int], List[str]]] =None UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : bool =True UpperCamelCase__ : Optional[Literal["c", "python", "pyarrow"]] =None UpperCamelCase__ : Dict[Union[int, str], Callable[[Any], Any]] =None UpperCamelCase__ : Optional[list] =None UpperCamelCase__ : Optional[list] =None UpperCamelCase__ : bool =False UpperCamelCase__ : Optional[Union[int, List[int]]] =None UpperCamelCase__ : Optional[int] =None UpperCamelCase__ : Optional[Union[str, List[str]]] =None UpperCamelCase__ : bool =True UpperCamelCase__ : bool =True UpperCamelCase__ : bool =False UpperCamelCase__ : bool =True UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : str ="." UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : str ='"' UpperCamelCase__ : int =0 UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : bool =True UpperCamelCase__ : bool =True UpperCamelCase__ : int =0 UpperCamelCase__ : bool =True UpperCamelCase__ : bool =False UpperCamelCase__ : Optional[str] =None UpperCamelCase__ : int =1_0_0_0_0 UpperCamelCase__ : Optional[datasets.Features] =None UpperCamelCase__ : Optional[str] ="strict" UpperCamelCase__ : Literal["error", "warn", "skip"] ="error" UpperCamelCase__ : Optional[str] =None def __lowercase ( self ): """simple docstring""" if self.delimiter is not None: __UpperCamelCase : Optional[Any] =self.delimiter if self.column_names is not None: __UpperCamelCase : List[str] =self.column_names @property def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] ={ 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , UpperCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class __A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCamelCase__ : Tuple =CsvConfig def __lowercase ( self ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) __UpperCamelCase : Tuple =dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase__ , (str, list, tuple) ): __UpperCamelCase : int =data_files if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __UpperCamelCase : int =[files] __UpperCamelCase : str =[dl_manager.iter_files(UpperCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] __UpperCamelCase : Optional[Any] =[] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __UpperCamelCase : Tuple =[files] __UpperCamelCase : Dict =[dl_manager.iter_files(UpperCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'files': files} ) ) return splits def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" if self.config.features is not None: __UpperCamelCase : Any =self.config.features.arrow_schema if all(not require_storage_cast(UpperCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast __UpperCamelCase : Dict =pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=UpperCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example __UpperCamelCase : str =table_cast(UpperCAmelCase__ , UpperCAmelCase__ ) return pa_table def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : List[Any] =self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str __UpperCamelCase : List[str] =( { name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ): __UpperCamelCase : Tuple =pd.read_csv(UpperCAmelCase__ , iterator=UpperCAmelCase__ , dtype=UpperCAmelCase__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(UpperCAmelCase__ ): __UpperCamelCase : int =pa.Table.from_pandas(UpperCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__ ) except ValueError as e: logger.error(f'Failed to read file \'{file}\' with error {type(UpperCAmelCase__ )}: {e}' ) raise
71
"""simple docstring""" import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ): __lowercase = parent __lowercase = vocab_size __lowercase = batch_size __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = is_training __lowercase = use_labels __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = type_sequence_label_size __lowercase = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase = (image_size // patch_size) ** 2 __lowercase = num_patches + 1 def _lowercase ( self : int ): __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size ) __lowercase = BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, ) return config, pixel_values, labels def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ): __lowercase = FlaxBeitModel(config=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ): __lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ): __lowercase = self.type_sequence_label_size __lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowercase = 1 __lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ ) __lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowercase = model(UpperCAmelCase__ ) def _lowercase ( self : List[str] ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) = config_and_inputs __lowercase = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class _lowerCAmelCase ( lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def _lowercase ( self : List[Any] ): __lowercase = FlaxBeitModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 ) def _lowercase ( self : Union[str, Any] ): self.config_tester.run_common_tests() def _lowercase ( self : Optional[int] ): __lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(UpperCAmelCase__ ) __lowercase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ["pixel_values"] self.assertListEqual(arg_names[:1], UpperCAmelCase__ ) def _lowercase ( self : Tuple ): __lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = model_class(UpperCAmelCase__ ) @jax.jit def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ): return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ ) with self.subTest("JIT Enabled" ): __lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ): self.assertEqual(jitted_output.shape, output.shape ) def _lowercase ( self : List[str] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def _lowercase ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def _lowercase ( self : Tuple ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def _lowercase ( self : Union[str, Any] ): for model_class_name in self.all_model_classes: __lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" ) __lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) ) self.assertIsNotNone(UpperCAmelCase__ ) def _A ( ) -> str: '''simple docstring''' __lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @require_flax class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _lowercase ( self : Any ): __lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values # prepare bool_masked_pos __lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ ) # forward pass __lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ ) __lowercase = outputs.logits # verify the logits __lowercase = (1, 1_9_6, 8_1_9_2) self.assertEqual(logits.shape, UpperCAmelCase__ ) __lowercase = np.array( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) ) @slow def _lowercase ( self : Any ): __lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ) # forward pass __lowercase = model(**UpperCAmelCase__ ) __lowercase = outputs.logits # verify the logits __lowercase = (1, 1_0_0_0) self.assertEqual(logits.shape, UpperCAmelCase__ ) __lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] ) self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) ) __lowercase = 2_8_1 self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ ) @slow def _lowercase ( self : List[str] ): __lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ) # forward pass __lowercase = model(**UpperCAmelCase__ ) __lowercase = outputs.logits # verify the logits __lowercase = (1, 2_1_8_4_1) self.assertEqual(logits.shape, UpperCAmelCase__ ) __lowercase = np.array([1.6_881, -0.2_787, 0.5_901] ) self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) ) __lowercase = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
17
0
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = path_or_paths __UpperCAmelCase : Optional[Any] = split if split or isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else """train""" __UpperCAmelCase : Union[str, Any] = features __UpperCAmelCase : List[str] = cache_dir __UpperCAmelCase : Optional[int] = keep_in_memory __UpperCAmelCase : Optional[int] = streaming __UpperCAmelCase : Tuple = num_proc __UpperCAmelCase : str = kwargs @abstractmethod def __A ( self ) -> Optional[int]: '''simple docstring''' pass class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Dict: '''simple docstring''' __UpperCAmelCase : Optional[int] = features __UpperCAmelCase : Any = cache_dir __UpperCAmelCase : Union[str, Any] = keep_in_memory __UpperCAmelCase : Dict = streaming __UpperCAmelCase : Optional[int] = num_proc __UpperCAmelCase : Any = kwargs @abstractmethod def __A ( self ) -> Optional[Any]: '''simple docstring''' pass
254
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _lowerCAmelCase ( unittest.TestCase ,lowercase ): """simple docstring""" def _lowercase ( self : List[Any] ): __lowercase = load_tool("text-classification" ) self.tool.setup() __lowercase = load_tool("text-classification", remote=UpperCAmelCase__ ) def _lowercase ( self : str ): __lowercase = self.tool("That's quite cool", ["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" ) def _lowercase ( self : str ): __lowercase = self.remote_tool("That's quite cool", ["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" ) def _lowercase ( self : List[str] ): __lowercase = self.tool(text="That's quite cool", labels=["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" ) def _lowercase ( self : Tuple ): __lowercase = self.remote_tool(text="That's quite cool", labels=["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" )
17
0
'''simple docstring''' from collections.abc import Generator from math import sin def __lowerCamelCase ( _lowercase ) -> bytes: if len(UpperCamelCase_ ) != 3_2: raise ValueError("""Input must be of length 32""" ) UpperCAmelCase : List[Any] = B"""""" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def __lowerCamelCase ( _lowercase ) -> bytes: if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCAmelCase : Union[str, Any] = format(UpperCamelCase_ , """08x""" )[-8:] UpperCAmelCase : Optional[Any] = B"""""" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" ) return little_endian_hex def __lowerCamelCase ( _lowercase ) -> bytes: UpperCAmelCase : str = B"""""" for char in message: bit_string += format(UpperCamelCase_ , """08b""" ).encode("""utf-8""" ) UpperCAmelCase : int = format(len(UpperCamelCase_ ) , """064b""" ).encode("""utf-8""" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(UpperCamelCase_ ) % 5_1_2 != 4_4_8: bit_string += b"0" bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] ) return bit_string def __lowerCamelCase ( _lowercase ) -> Generator[list[int], None, None]: if len(UpperCamelCase_ ) % 5_1_2 != 0: raise ValueError("""Input must have length that's a multiple of 512""" ) for pos in range(0 , len(UpperCamelCase_ ) , 5_1_2 ): UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 5_1_2] UpperCAmelCase : Tuple = [] for i in range(0 , 5_1_2 , 3_2 ): block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) ) yield block_words def __lowerCamelCase ( _lowercase ) -> int: if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCAmelCase : List[Any] = format(UpperCamelCase_ , """032b""" ) UpperCAmelCase : Any = """""" for c in i_str: new_str += "1" if c == "0" else "0" return int(UpperCamelCase_ , 2 ) def __lowerCamelCase ( _lowercase , _lowercase ) -> int: return (a + b) % 2**3_2 def __lowerCamelCase ( _lowercase , _lowercase ) -> int: if i < 0: raise ValueError("""Input must be non-negative""" ) if shift < 0: raise ValueError("""Shift must be non-negative""" ) return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2 def __lowerCamelCase ( _lowercase ) -> bytes: UpperCAmelCase : int = preprocess(UpperCamelCase_ ) UpperCAmelCase : Any = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )] # Starting states UpperCAmelCase : Dict = 0x67_452_301 UpperCAmelCase : Optional[int] = 0xEF_CDA_B89 UpperCAmelCase : str = 0x98_BAD_CFE UpperCAmelCase : Optional[Any] = 0x10_325_476 UpperCAmelCase : int = [ 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(UpperCamelCase_ ): UpperCAmelCase : Optional[Any] = aa UpperCAmelCase : str = ba UpperCAmelCase : str = ca UpperCAmelCase : List[Any] = da # Hash current chunk for i in range(6_4 ): if i <= 1_5: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCAmelCase : Tuple = d ^ (b & (c ^ d)) UpperCAmelCase : Optional[Any] = i elif i <= 3_1: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCAmelCase : Union[str, Any] = c ^ (d & (b ^ c)) UpperCAmelCase : Tuple = (5 * i + 1) % 1_6 elif i <= 4_7: UpperCAmelCase : Any = b ^ c ^ d UpperCAmelCase : Tuple = (3 * i + 5) % 1_6 else: UpperCAmelCase : Union[str, Any] = c ^ (b | not_aa(UpperCamelCase_ )) UpperCAmelCase : Dict = (7 * i) % 1_6 UpperCAmelCase : Dict = (f + a + added_consts[i] + block_words[g]) % 2**3_2 UpperCAmelCase : Tuple = d UpperCAmelCase : Tuple = c UpperCAmelCase : Optional[int] = b UpperCAmelCase : Optional[Any] = sum_aa(UpperCamelCase_ , left_rotate_aa(UpperCamelCase_ , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCAmelCase : Optional[Any] = sum_aa(UpperCamelCase_ , UpperCamelCase_ ) UpperCAmelCase : Tuple = sum_aa(UpperCamelCase_ , UpperCamelCase_ ) UpperCAmelCase : Optional[Any] = sum_aa(UpperCamelCase_ , UpperCamelCase_ ) UpperCAmelCase : int = sum_aa(UpperCamelCase_ , UpperCamelCase_ ) UpperCAmelCase : Any = reformat_hex(UpperCamelCase_ ) + reformat_hex(UpperCamelCase_ ) + reformat_hex(UpperCamelCase_ ) + reformat_hex(UpperCamelCase_ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
265
"""simple docstring""" from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker _a = 'CompVis/stable-diffusion-v1-1' _a = 'CompVis/stable-diffusion-v1-2' _a = 'CompVis/stable-diffusion-v1-3' _a = 'CompVis/stable-diffusion-v1-4' class _lowerCAmelCase ( lowercase ): """simple docstring""" def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCAmelCase__ : StableDiffusionSafetyChecker, UpperCAmelCase__ : CLIPImageProcessor, UpperCAmelCase__ : bool = True, ): super()._init_() __lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ ) __lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ ) __lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ ) __lowercase = StableDiffusionPipeline( vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, requires_safety_checker=UpperCAmelCase__, ) self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea ) @property def _lowercase ( self : List[str] ): return {k: getattr(self, UpperCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )} def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowercase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase__ ) def _lowercase ( self : List[str] ): self.enable_attention_slicing(UpperCAmelCase__ ) @torch.no_grad() def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Tuple, ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : str, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Any, ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Optional[int], ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ): __lowercase = "cuda" if torch.cuda.is_available() else "cpu" self.to(UpperCAmelCase__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get first result from Stable Diffusion Checkpoint v1.2 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get first result from Stable Diffusion Checkpoint v1.3 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get first result from Stable Diffusion Checkpoint v1.4 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
17
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
221
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _lowerCAmelCase ( lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = "ssube/stable-diffusion-x4-upscaler-onnx" def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[str]=0 ): __lowercase = floats_tensor((1, 3, 1_2_8, 1_2_8), rng=random.Random(UpperCAmelCase__ ) ) __lowercase = torch.manual_seed(UpperCAmelCase__ ) __lowercase = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def _lowercase ( self : Any ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def _lowercase ( self : Optional[Any] ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowercase ( self : int ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowercase ( self : str ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowercase ( self : Any ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @property def _lowercase ( self : Tuple ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowercase ( self : Dict ): __lowercase = ort.SessionOptions() __lowercase = False return options def _lowercase ( self : Dict ): __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowercase = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = "A fantasy landscape, trending on artstation" __lowercase = torch.manual_seed(0 ) __lowercase = pipe( prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=UpperCAmelCase__, output_type="np", ) __lowercase = output.images __lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _lowercase ( self : str ): __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowercase = init_image.resize((1_2_8, 1_2_8) ) __lowercase = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" ) __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", scheduler=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = "A fantasy landscape, trending on artstation" __lowercase = torch.manual_seed(0 ) __lowercase = pipe( prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=UpperCAmelCase__, output_type="np", ) __lowercase = output.images __lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
17
0
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowerCAmelCase__( lowercase : int ) -> Optional[int]: def is_in_circle(lowercase : float , lowercase : float ) -> bool: __snake_case : Union[str, Any] = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle __snake_case : List[Any] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(UpperCamelCase_ ) ) # The ratio of the area for circle to square is pi/4. __snake_case : str = proportion * 4 print(f"""The estimated value of pi is {pi_estimate}""" ) print(f"""The numpy value of pi is {pi}""" ) print(f"""The total error is {abs(pi - pi_estimate )}""" ) def lowerCAmelCase__( lowercase : int , lowercase : Callable[[float], float] , lowercase : float = 0.0 , lowercase : float = 1.0 , ) -> float: return mean( function_to_integrate(uniform(UpperCamelCase_ , UpperCamelCase_ ) ) for _ in range(UpperCamelCase_ ) ) * (max_value - min_value) def lowerCAmelCase__( lowercase : int , lowercase : float = 0.0 , lowercase : float = 1.0 ) -> None: def identity_function(lowercase : float ) -> float: return x __snake_case : Union[str, Any] = area_under_curve_estimator( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) __snake_case : List[str] = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {expected_value}""" ) print(f"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def lowerCAmelCase__( lowercase : int ) -> None: def function_to_integrate(lowercase : float ) -> float: return sqrt(4.0 - x * x ) __snake_case : Tuple = area_under_curve_estimator( UpperCamelCase_ , UpperCamelCase_ , 0.0 , 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {pi}""" ) print(f"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
326
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _a = datasets.utils.logging.get_logger(__name__) _a = ['names', 'prefix'] _a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] _a = ['encoding_errors', 'on_bad_lines'] _a = ['date_format'] @dataclass class _lowerCAmelCase ( datasets.BuilderConfig ): """simple docstring""" __UpperCAmelCase : str = "," __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[Union[int, List[int], str]] = "infer" __UpperCAmelCase : Optional[List[str]] = None __UpperCAmelCase : Optional[List[str]] = None __UpperCAmelCase : Optional[Union[int, str, List[int], List[str]]] = None __UpperCAmelCase : Optional[Union[List[int], List[str]]] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : bool = True __UpperCAmelCase : Optional[Literal["c", "python", "pyarrow"]] = None __UpperCAmelCase : Dict[Union[int, str], Callable[[Any], Any]] = None __UpperCAmelCase : Optional[list] = None __UpperCAmelCase : Optional[list] = None __UpperCAmelCase : bool = False __UpperCAmelCase : Optional[Union[int, List[int]]] = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[Union[str, List[str]]] = None __UpperCAmelCase : bool = True __UpperCAmelCase : bool = True __UpperCAmelCase : bool = False __UpperCAmelCase : bool = True __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : str = "." __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : str = '"' __UpperCAmelCase : int = 0 __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : bool = True __UpperCAmelCase : bool = True __UpperCAmelCase : int = 0 __UpperCAmelCase : bool = True __UpperCAmelCase : bool = False __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : int = 1_0_0_0_0 __UpperCAmelCase : Optional[datasets.Features] = None __UpperCAmelCase : Optional[str] = "strict" __UpperCAmelCase : Literal["error", "warn", "skip"] = "error" __UpperCAmelCase : Optional[str] = None def _lowercase ( self : Tuple ): if self.delimiter is not None: __lowercase = self.delimiter if self.column_names is not None: __lowercase = self.column_names @property def _lowercase ( self : Union[str, Any] ): __lowercase = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), UpperCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class _lowerCAmelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" __UpperCAmelCase : Tuple = CsvConfig def _lowercase ( self : List[str] ): return datasets.DatasetInfo(features=self.config.features ) def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __lowercase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase__, (str, list, tuple) ): __lowercase = data_files if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = [files] __lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )] __lowercase = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = [files] __lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__, gen_kwargs={"files": files} ) ) return splits def _lowercase ( self : Dict, UpperCAmelCase__ : pa.Table ): if self.config.features is not None: __lowercase = self.config.features.arrow_schema if all(not require_storage_cast(UpperCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast __lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=UpperCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example __lowercase = table_cast(UpperCAmelCase__, UpperCAmelCase__ ) return pa_table def _lowercase ( self : Optional[Any], UpperCAmelCase__ : List[str] ): __lowercase = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str __lowercase = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase__ ) else object for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ): __lowercase = pd.read_csv(UpperCAmelCase__, iterator=UpperCAmelCase__, dtype=UpperCAmelCase__, **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(UpperCAmelCase__ ): __lowercase = pa.Table.from_pandas(UpperCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__ ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase__ )}: {e}""" ) raise
17
0
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase ( UpperCamelCase__ ): def __init__( self , *_a , _a=None , _a=None , **_a ) -> Union[str, Any]: super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ ) _A : Union[str, Any] = eval_examples _A : List[str] = post_process_function def a__ ( self , _a = None , _a=None , _a = None , _a = "eval" , **_a , ) -> str: _A : Any = gen_kwargs.copy() _A : Tuple = ( gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length ) _A : int = ( gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams ) _A : List[str] = gen_kwargs _A : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset _A : List[str] = self.get_eval_dataloader(UpperCAmelCase__ ) _A : List[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A : Dict = self.compute_metrics _A : List[str] = None _A : List[Any] = time.time() _A : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A : Any = eval_loop( UpperCAmelCase__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , ) finally: _A : Optional[int] = compute_metrics _A : Any = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _A : Tuple = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) _A : Tuple = self.compute_metrics(UpperCAmelCase__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A : Any = metrics.pop(UpperCAmelCase__ ) metrics.update(output.metrics ) else: _A : str = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(UpperCAmelCase__ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase__ ) return metrics def a__ ( self , _a , _a , _a=None , _a = "test" , **_a ) -> Optional[Any]: _A : int = gen_kwargs.copy() _A : int = self.get_test_dataloader(UpperCAmelCase__ ) # Temporarily disable metric computation, we will do it in the loop here. _A : Optional[Any] = self.compute_metrics _A : Optional[Any] = None _A : List[str] = time.time() _A : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _A : Optional[int] = eval_loop( UpperCAmelCase__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , ) finally: _A : Union[str, Any] = compute_metrics _A : Optional[int] = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _A : Optional[int] = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , """predict""" ) _A : Union[str, Any] = self.compute_metrics(UpperCAmelCase__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A : Optional[int] = metrics.pop(UpperCAmelCase__ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase__ )
26
"""simple docstring""" from scipy.stats import spearmanr import datasets _a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n' _a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n' _a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): """simple docstring""" def _lowercase ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], ) def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ): __lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
17
0
"""simple docstring""" a = { 0: '''0''', 1: '''1''', 2: '''2''', 3: '''3''', 4: '''4''', 5: '''5''', 6: '''6''', 7: '''7''', 8: '''8''', 9: '''9''', 10: '''a''', 11: '''b''', 12: '''c''', 13: '''d''', 14: '''e''', 15: '''f''', } def _snake_case ( _snake_case : float ) -> str: '''simple docstring''' assert type(UpperCamelCase_ ) in (int, float) and decimal == int(UpperCamelCase_ ) _A = int(UpperCamelCase_ ) _A = '' _A = False if decimal < 0: _A = True decimal *= -1 while decimal > 0: _A , _A = divmod(UpperCamelCase_ , 16 ) _A = values[remainder] + hexadecimal _A = '0x' + hexadecimal if negative: _A = '-' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
315
"""simple docstring""" from collections.abc import Sequence def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_)) def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float: '''simple docstring''' __lowercase = 0.0 for coeff in reversed(UpperCamelCase_): __lowercase = result * x + coeff return result if __name__ == "__main__": _a = (0.0, 0.0, 5.0, 9.3, 7.0) _a = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
17
0
lowercase__ : Dict = '''\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n''' lowercase__ : Optional[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] lowercase__ : Optional[Any] = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
338
"""simple docstring""" import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class _lowerCAmelCase ( pl.LightningModule ): """simple docstring""" def __init__( self : Optional[Any], UpperCAmelCase__ : str ): super().__init__() __lowercase = model __lowercase = 2 __lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels ) def _lowercase ( self : Optional[int] ): pass def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str: '''simple docstring''' __lowercase = LongformerModel.from_pretrained(UpperCamelCase_) __lowercase = LightningModel(UpperCamelCase_) __lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu")) lightning_model.load_state_dict(ckpt["state_dict"]) # init longformer question answering model __lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict()) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict()) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(UpperCamelCase_) print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""") if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
17
0
'''simple docstring''' from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar a__ : Dict =TypeVar('''T''') class snake_case ( Generic[T] ): """simple docstring""" def __init__( self : Any , __A : bool = True ): __UpperCamelCase = {} # dictionary of lists __UpperCamelCase = directed def _lowerCamelCase ( self : Dict , __A : T , __A : T ): if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) self.adj_list[destination_vertex].append(UpperCAmelCase__ ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) __UpperCamelCase = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(UpperCAmelCase__ ) __UpperCamelCase = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: __UpperCamelCase = [destination_vertex] __UpperCamelCase = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) __UpperCamelCase = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: __UpperCamelCase = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: __UpperCamelCase = [destination_vertex] __UpperCamelCase = [] return self def __repr__( self : Any ): return pformat(self.adj_list )
53
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[int] ): if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, ) assert hasattr(self, "env" ) def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ): # configuration for running training on smdistributed Model Parallel __lowercase = { "enabled": True, "processes_per_host": 8, } __lowercase = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } __lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} __lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 5_0_0, }, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", ) def _lowercase ( self : Tuple, UpperCAmelCase__ : int ): TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ): # create estimator __lowercase = self.create_estimator(UpperCAmelCase__ ) # run training estimator.fit() # result dataframe __lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis __lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) __lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping __lowercase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
17
0
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging __lowerCamelCase = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) __lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCamelCase ( ): snake_case : Optional[Any] = "https://pypi.org/pypi/diffusers/json" snake_case : Optional[int] = json.loads(request.urlopen(UpperCamelCase_ ).read() )["releases"].keys() return sorted(UpperCamelCase_ , key=lambda __lowerCamelCase : version.Version(UpperCamelCase_ ) ) def UpperCamelCase ( ): if HF_MODULES_CACHE in sys.path: return sys.path.append(UpperCamelCase_ ) os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) snake_case : Union[str, Any] = Path(UpperCamelCase_ ) / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCamelCase ( __lowerCamelCase : Union[str, os.PathLike] ): init_hf_modules() snake_case : List[Any] = Path(UpperCamelCase_ ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) snake_case : Any = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def UpperCamelCase ( __lowerCamelCase : Dict ): with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f: snake_case : Union[str, Any] = f.read() # Imports of the form `import .xxx` snake_case : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , UpperCamelCase_ , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , UpperCamelCase_ , flags=re.MULTILINE ) # Unique-ify return list(set(UpperCamelCase_ ) ) def UpperCamelCase ( __lowerCamelCase : List[str] ): snake_case : List[Any] = False snake_case : str = [module_file] snake_case : str = [] # Let's recurse through all relative imports while not no_change: snake_case : str = [] for f in files_to_check: new_imports.extend(get_relative_imports(UpperCamelCase_ ) ) snake_case : str = Path(UpperCamelCase_ ).parent snake_case : List[str] = [str(module_path / m ) for m in new_imports] snake_case : int = [f for f in new_import_files if f not in all_relative_imports] snake_case : Dict = [f"""{f}.py""" for f in new_import_files] snake_case : int = len(UpperCamelCase_ ) == 0 all_relative_imports.extend(UpperCamelCase_ ) return all_relative_imports def UpperCamelCase ( __lowerCamelCase : Optional[int] ): with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f: snake_case : List[Any] = f.read() # Imports of the form `import xxx` snake_case : Any = re.findall("^\s*import\s+(\S+)\s*$" , UpperCamelCase_ , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import" , UpperCamelCase_ , flags=re.MULTILINE ) # Only keep the top-level module snake_case : Tuple = [imp.split("." )[0] for imp in imports if not imp.startswith("." )] # Unique-ify and test we got them all snake_case : Optional[Any] = list(set(UpperCamelCase_ ) ) snake_case : Any = [] for imp in imports: try: importlib.import_module(UpperCamelCase_ ) except ImportError: missing_packages.append(UpperCamelCase_ ) if len(UpperCamelCase_ ) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " f"""{', '.join(UpperCamelCase_ )}. Run `pip install {' '.join(UpperCamelCase_ )}`""" ) return get_relative_imports(UpperCamelCase_ ) def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int ): snake_case : Tuple = module_path.replace(os.path.sep , "." ) snake_case : int = importlib.import_module(UpperCamelCase_ ) if class_name is None: return find_pipeline_class(UpperCamelCase_ ) return getattr(UpperCamelCase_ , UpperCamelCase_ ) def UpperCamelCase ( __lowerCamelCase : int ): from ..pipelines import DiffusionPipeline snake_case : Tuple = dict(inspect.getmembers(UpperCamelCase_ , inspect.isclass ) ) snake_case : List[str] = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , UpperCamelCase_ ) and cls.__module__.split("." )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:""" f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in""" f""" {loaded_module}.""" ) snake_case : List[str] = cls return pipeline_class def UpperCamelCase ( __lowerCamelCase : Union[str, os.PathLike] , __lowerCamelCase : str , __lowerCamelCase : Optional[Union[str, os.PathLike]] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[Dict[str, str]] = None , __lowerCamelCase : Optional[Union[bool, str]] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : bool = False , ): snake_case : Optional[int] = str(UpperCamelCase_ ) snake_case : Any = os.path.join(UpperCamelCase_ , UpperCamelCase_ ) if os.path.isfile(UpperCamelCase_ ): snake_case : List[str] = module_file_or_url snake_case : str = "local" elif pretrained_model_name_or_path.count("/" ) == 0: snake_case : Optional[int] = get_diffusers_versions() # cut ".dev0" snake_case : Tuple = "v" + ".".join(__version__.split("." )[:3] ) # retrieve github version that matches if revision is None: snake_case : Union[str, Any] = latest_version if latest_version[1:] in available_versions else "main" logger.info(f"""Defaulting to latest_version: {revision}.""" ) elif revision in available_versions: snake_case : Union[str, Any] = f"""v{revision}""" elif revision == "main": snake_case : int = revision else: raise ValueError( f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of""" f""" {', '.join(available_versions + ['main'] )}.""" ) # community pipeline on GitHub snake_case : Optional[int] = COMMUNITY_PIPELINES_URL.format(revision=UpperCamelCase_ , pipeline=UpperCamelCase_ ) try: snake_case : Any = cached_download( UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , ) snake_case : str = "git" snake_case : Optional[Any] = pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise else: try: # Load from URL or cache if already cached snake_case : List[str] = hf_hub_download( UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , ) snake_case : str = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) ) except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise # Check we have all the requirements in our environment snake_case : List[str] = check_imports(UpperCamelCase_ ) # Now we move the module inside our cached dynamic modules. snake_case : str = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(UpperCamelCase_ ) snake_case : Any = Path(UpperCamelCase_ ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(UpperCamelCase_ , submodule_path / module_file ) for module_needed in modules_needed: snake_case : Optional[int] = f"""{module_needed}.py""" shutil.copy(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(UpperCamelCase_ , UpperCamelCase_ ): snake_case : List[str] = use_auth_token elif use_auth_token is True: snake_case : Union[str, Any] = HfFolder.get_token() else: snake_case : List[Any] = None snake_case : Optional[Any] = model_info(UpperCamelCase_ , revision=UpperCamelCase_ , token=UpperCamelCase_ ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. snake_case : int = submodule_path / commit_hash snake_case : Optional[Any] = full_submodule + os.path.sep + commit_hash create_dynamic_module(UpperCamelCase_ ) if not (submodule_path / module_file).exists(): shutil.copy(UpperCamelCase_ , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( UpperCamelCase_ , f"""{module_needed}.py""" , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , ) return os.path.join(UpperCamelCase_ , UpperCamelCase_ ) def UpperCamelCase ( __lowerCamelCase : Union[str, os.PathLike] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Union[str, os.PathLike]] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[Dict[str, str]] = None , __lowerCamelCase : Optional[Union[bool, str]] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : bool = False , **__lowerCamelCase : List[str] , ): snake_case : List[str] = get_cached_module_file( UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , ) return get_class_in_module(UpperCamelCase_ , final_module.replace(".py" , "" ) )
59
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : Tuple = "openai/whisper-base" __UpperCAmelCase : Union[str, Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __UpperCAmelCase : List[str] = "transcriber" __UpperCAmelCase : Optional[Any] = WhisperProcessor __UpperCAmelCase : str = WhisperForConditionalGeneration __UpperCAmelCase : List[str] = ["audio"] __UpperCAmelCase : Tuple = ["text"] def _lowercase ( self : str, UpperCAmelCase__ : int ): return self.pre_processor(UpperCAmelCase__, return_tensors="pt" ).input_features def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ): return self.model.generate(inputs=UpperCAmelCase__ ) def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int] ): return self.pre_processor.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )[0]
17
0
from numpy import exp, pi, sqrt def A_ ( _UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
13
"""simple docstring""" import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]: '''simple docstring''' if isinstance(UpperCamelCase_, torch.Tensor): return image elif isinstance(UpperCamelCase_, PIL.Image.Image): __lowercase = [image] if isinstance(image[0], PIL.Image.Image): __lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] __lowercase = np.concatenate(UpperCamelCase_, axis=0) __lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0 __lowercase = image.transpose(0, 3, 1, 2) __lowercase = 2.0 * image - 1.0 __lowercase = torch.from_numpy(UpperCamelCase_) elif isinstance(image[0], torch.Tensor): __lowercase = torch.cat(UpperCamelCase_, dim=0) return image def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int: '''simple docstring''' if not isinstance(UpperCamelCase_, np.ndarray): __lowercase = True __lowercase = va.device __lowercase = va.cpu().numpy() __lowercase = va.cpu().numpy() __lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_))) if np.abs(UpperCamelCase_) > DOT_THRESHOLD: __lowercase = (1 - t) * va + t * va else: __lowercase = np.arccos(UpperCamelCase_) __lowercase = np.sin(UpperCamelCase_) __lowercase = theta_a * t __lowercase = np.sin(UpperCamelCase_) __lowercase = np.sin(theta_a - theta_t) / sin_theta_a __lowercase = sin_theta_t / sin_theta_a __lowercase = sa * va + sa * va if inputs_are_torch: __lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_) return va def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int: '''simple docstring''' __lowercase = F.normalize(UpperCamelCase_, dim=-1) __lowercase = F.normalize(UpperCamelCase_, dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]: '''simple docstring''' for param in model.parameters(): __lowercase = value class _lowerCAmelCase ( lowercase ): """simple docstring""" def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ): super().__init__() self.register_modules( vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, ) __lowercase = ( feature_extractor.size if isinstance(feature_extractor.size, UpperCAmelCase__ ) else feature_extractor.size["shortest_edge"] ) __lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std ) set_requires_grad(self.text_encoder, UpperCAmelCase__ ) set_requires_grad(self.clip_model, UpperCAmelCase__ ) def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowercase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase__ ) def _lowercase ( self : int ): self.enable_attention_slicing(UpperCAmelCase__ ) def _lowercase ( self : str ): set_requires_grad(self.vae, UpperCAmelCase__ ) def _lowercase ( self : Any ): set_requires_grad(self.vae, UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any] ): set_requires_grad(self.unet, UpperCAmelCase__ ) def _lowercase ( self : Any ): set_requires_grad(self.unet, UpperCAmelCase__ ) def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ): # get the original timestep using init_timestep __lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ ) __lowercase = max(num_inference_steps - init_timestep, 0 ) __lowercase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ): if not isinstance(UpperCAmelCase__, torch.Tensor ): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" ) __lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ ) ] __lowercase = torch.cat(UpperCAmelCase__, dim=0 ) else: __lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 0.18_215 * init_latents __lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 ) __lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ ) # get latents __lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = init_latents return latents def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ): __lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): __lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) ) __lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," ) def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ): __lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ ) __lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half() __lowercase = self.clip_model.get_image_features(UpperCAmelCase__ ) __lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ ) __lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 ) return image_embeddings_clip @torch.enable_grad() def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ): __lowercase = latents.detach().requires_grad_() __lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) # predict the noise residual __lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): __lowercase = self.scheduler.alphas_cumprod[timestep] __lowercase = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 __lowercase = torch.sqrt(UpperCAmelCase__ ) __lowercase = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler, UpperCAmelCase__ ): __lowercase = self.scheduler.sigmas[index] __lowercase = latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 1 / 0.18_215 * sample __lowercase = self.vae.decode(UpperCAmelCase__ ).sample __lowercase = (image / 2 + 0.5).clamp(0, 1 ) __lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ ) __lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype ) __lowercase = self.clip_model.get_image_features(UpperCAmelCase__ ) __lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ ) __lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale __lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0] if isinstance(self.scheduler, UpperCAmelCase__ ): __lowercase = latents.detach() + grads * (sigma**2) __lowercase = noise_pred_original else: __lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ): if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1: __lowercase = [generator] + [None] * (batch_size - 1) __lowercase = [ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] __lowercase = [x[0] for x in coca_is_none if x[1]] __lowercase = ", ".join(UpperCAmelCase__ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(UpperCAmelCase__ ): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) __lowercase = self.get_image_description(UpperCAmelCase__ ) if style_prompt is None: if len(UpperCAmelCase__ ): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) __lowercase = self.get_image_description(UpperCAmelCase__ ) # get prompt text embeddings for content and style __lowercase = self.tokenizer( UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", ) __lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] __lowercase = self.tokenizer( UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", ) __lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] __lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # duplicate text embeddings for each generation per prompt __lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 ) # set timesteps __lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) __lowercase = {} if accepts_offset: __lowercase = 1 self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) __lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device ) __lowercase = timesteps[:1].repeat(UpperCAmelCase__ ) # Preprocess image __lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.prepare_latents( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ ) __lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.prepare_latents( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ ) __lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) if clip_guidance_scale > 0: __lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = slerp( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __lowercase = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __lowercase = content_text_input.input_ids.shape[-1] __lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" ) __lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt __lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowercase = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8) __lowercase = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps __lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to( self.device ) else: __lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) __lowercase = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __lowercase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __lowercase = {} if accepts_eta: __lowercase = eta # check if the scheduler accepts generator __lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: __lowercase = generator with self.progress_bar(total=UpperCAmelCase__ ): for i, t in enumerate(UpperCAmelCase__ ): # expand the latents if we are doing classifier free guidance __lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) # predict the noise residual __lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample # perform classifier free guidance if do_classifier_free_guidance: __lowercase ,__lowercase = noise_pred.chunk(2 ) __lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: __lowercase = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) __lowercase ,__lowercase = self.cond_fn( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) # compute the previous noisy sample x_t -> x_t-1 __lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 1 / 0.18_215 * latents __lowercase = self.vae.decode(UpperCAmelCase__ ).sample __lowercase = (image / 2 + 0.5).clamp(0, 1 ) __lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": __lowercase = self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
17
0
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') A_ :Union[str, Any] = logging.getLogger(__name__) @dataclass class __A : """simple docstring""" UpperCamelCase__ : str =field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) UpperCamelCase__ : Optional[str] =field( default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) UpperCamelCase__ : Optional[str] =field( default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) UpperCamelCase__ : Optional[str] =field( default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) UpperCamelCase__ : bool =field( default=a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) UpperCamelCase__ : str =field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase__ : bool =field( default=a , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) @dataclass class __A : """simple docstring""" UpperCamelCase__ : Optional[str] =field(default=a , metadata={"""help""": """The input training data file (a text file)."""} ) UpperCamelCase__ : Optional[str] =field( default=a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) UpperCamelCase__ : bool =field( default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) UpperCamelCase__ : Optional[int] =field( default=a , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) UpperCamelCase__ : Optional[int] =field( default=a , metadata={ """help""": ( """The maximum total input sequence length after tokenization. If passed, sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) UpperCamelCase__ : bool =field( default=a , metadata={ """help""": ( """Whether to pad all samples to the maximum sentence length. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch. More """ """efficient on GPU but very bad for TPU.""" ) } , ) UpperCamelCase__ : Optional[int] =field( default=a , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCamelCase__ : Optional[int] =field( default=a , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def __lowercase ( self ): """simple docstring""" if self.train_file is not None: __UpperCamelCase : Any =self.train_file.split('.' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: __UpperCamelCase : int =self.validation_file.split('.' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class __A : """simple docstring""" UpperCamelCase__ : PreTrainedTokenizerBase UpperCamelCase__ : Union[bool, str, PaddingStrategy] =True UpperCamelCase__ : Optional[int] =None UpperCamelCase__ : Optional[int] =None def __call__( self , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Any ='label' if 'label' in features[0].keys() else 'labels' __UpperCamelCase : List[str] =[feature.pop(UpperCAmelCase__ ) for feature in features] __UpperCamelCase : Optional[Any] =len(UpperCAmelCase__ ) __UpperCamelCase : Dict =len(features[0]['input_ids'] ) __UpperCamelCase : int =[ [{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase__ )] for feature in features ] __UpperCamelCase : List[str] =list(chain(*UpperCAmelCase__ ) ) __UpperCamelCase : Dict =self.tokenizer.pad( UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) # Un-flatten __UpperCamelCase : Optional[int] ={k: v.view(UpperCAmelCase__ , UpperCAmelCase__ , -1 ) for k, v in batch.items()} # Add back labels __UpperCamelCase : int =torch.tensor(UpperCAmelCase__ , dtype=torch.intaa ) return batch def A ( ) -> int: __UpperCamelCase : Optional[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : int =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[Any] =parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_swag' ,UpperCamelCase_ ,UpperCamelCase_ ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __UpperCamelCase : int =training_args.get_process_log_level() logger.setLevel(UpperCamelCase_ ) datasets.utils.logging.set_verbosity(UpperCamelCase_ ) transformers.utils.logging.set_verbosity(UpperCamelCase_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. __UpperCamelCase : Optional[int] =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __UpperCamelCase : int =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: __UpperCamelCase : Any ={} if data_args.train_file is not None: __UpperCamelCase : int =data_args.train_file if data_args.validation_file is not None: __UpperCamelCase : Dict =data_args.validation_file __UpperCamelCase : Union[str, Any] =data_args.train_file.split('.' )[-1] __UpperCamelCase : Dict =load_dataset( UpperCamelCase_ ,data_files=UpperCamelCase_ ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,) else: # Downloading and loading the swag dataset from the hub. __UpperCamelCase : int =load_dataset( 'swag' ,'regular' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCamelCase : str =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,) __UpperCamelCase : Tuple =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,) __UpperCamelCase : List[str] =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=UpperCamelCase_ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,) # When using your own dataset or a different dataset from swag, you will probably need to change this. __UpperCamelCase : Optional[int] =[F'ending{i}' for i in range(4 )] __UpperCamelCase : str ='sent1' __UpperCamelCase : Tuple ='sent2' if data_args.max_seq_length is None: __UpperCamelCase : List[str] =tokenizer.model_max_length if max_seq_length > 1_024: logger.warning( 'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value' ' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can' ' override this default with `--block_size xxx`.' ) __UpperCamelCase : Any =1_024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the' F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) __UpperCamelCase : Optional[Any] =min(data_args.max_seq_length ,tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(a_ ): __UpperCamelCase : Dict =[[context] * 4 for context in examples[context_name]] __UpperCamelCase : int =examples[question_header_name] __UpperCamelCase : Tuple =[ [F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(UpperCamelCase_ ) ] # Flatten out __UpperCamelCase : int =list(chain(*UpperCamelCase_ ) ) __UpperCamelCase : List[Any] =list(chain(*UpperCamelCase_ ) ) # Tokenize __UpperCamelCase : Optional[int] =tokenizer( UpperCamelCase_ ,UpperCamelCase_ ,truncation=UpperCamelCase_ ,max_length=UpperCamelCase_ ,padding='max_length' if data_args.pad_to_max_length else False ,) # Un-flatten return {k: [v[i : i + 4] for i in range(0 ,len(UpperCamelCase_ ) ,4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) __UpperCamelCase : List[Any] =raw_datasets['train'] if data_args.max_train_samples is not None: __UpperCamelCase : Any =min(len(UpperCamelCase_ ) ,data_args.max_train_samples ) __UpperCamelCase : Union[str, Any] =train_dataset.select(range(UpperCamelCase_ ) ) with training_args.main_process_first(desc='train dataset map pre-processing' ): __UpperCamelCase : Tuple =train_dataset.map( UpperCamelCase_ ,batched=UpperCamelCase_ ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) __UpperCamelCase : str =raw_datasets['validation'] if data_args.max_eval_samples is not None: __UpperCamelCase : Optional[Any] =min(len(UpperCamelCase_ ) ,data_args.max_eval_samples ) __UpperCamelCase : Any =eval_dataset.select(range(UpperCamelCase_ ) ) with training_args.main_process_first(desc='validation dataset map pre-processing' ): __UpperCamelCase : Tuple =eval_dataset.map( UpperCamelCase_ ,batched=UpperCamelCase_ ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,) # Data collator __UpperCamelCase : Union[str, Any] =( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase_ ,pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(a_ ): __UpperCamelCase , __UpperCamelCase : Union[str, Any] =eval_predictions __UpperCamelCase : Optional[int] =np.argmax(UpperCamelCase_ ,axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer __UpperCamelCase : Any =Trainer( model=UpperCamelCase_ ,args=UpperCamelCase_ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,tokenizer=UpperCamelCase_ ,data_collator=UpperCamelCase_ ,compute_metrics=UpperCamelCase_ ,) # Training if training_args.do_train: __UpperCamelCase : List[str] =None if training_args.resume_from_checkpoint is not None: __UpperCamelCase : str =training_args.resume_from_checkpoint elif last_checkpoint is not None: __UpperCamelCase : Tuple =last_checkpoint __UpperCamelCase : Union[str, Any] =trainer.train(resume_from_checkpoint=UpperCamelCase_ ) trainer.save_model() # Saves the tokenizer too for easy upload __UpperCamelCase : Union[str, Any] =train_result.metrics __UpperCamelCase : str =( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase_ ) ) __UpperCamelCase : Tuple =min(UpperCamelCase_ ,len(UpperCamelCase_ ) ) trainer.log_metrics('train' ,UpperCamelCase_ ) trainer.save_metrics('train' ,UpperCamelCase_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) __UpperCamelCase : Optional[int] =trainer.evaluate() __UpperCamelCase : Dict =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase_ ) __UpperCamelCase : int =min(UpperCamelCase_ ,len(UpperCamelCase_ ) ) trainer.log_metrics('eval' ,UpperCamelCase_ ) trainer.save_metrics('eval' ,UpperCamelCase_ ) __UpperCamelCase : List[str] ={ 'finetuned_from': model_args.model_name_or_path, 'tasks': 'multiple-choice', 'dataset_tags': 'swag', 'dataset_args': 'regular', 'dataset': 'SWAG', 'language': 'en', } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase_ ) else: trainer.create_model_card(**UpperCamelCase_ ) def A ( a_ ) -> Optional[int]: main() if __name__ == "__main__": main()
71
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class _lowerCAmelCase : """simple docstring""" __UpperCAmelCase : Tuple = XGLMConfig __UpperCAmelCase : Optional[Any] = {} __UpperCAmelCase : Union[str, Any] = "gelu" def __init__( self : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=1_4, UpperCAmelCase__ : str=7, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[Any]=True, UpperCAmelCase__ : int=True, UpperCAmelCase__ : List[str]=9_9, UpperCAmelCase__ : Union[str, Any]=3_2, UpperCAmelCase__ : Union[str, Any]=2, UpperCAmelCase__ : Union[str, Any]=4, UpperCAmelCase__ : Tuple=3_7, UpperCAmelCase__ : List[Any]="gelu", UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Tuple=5_1_2, UpperCAmelCase__ : Optional[Any]=0.02, ): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_labels __lowercase = vocab_size __lowercase = d_model __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = ffn_dim __lowercase = activation_function __lowercase = activation_dropout __lowercase = attention_dropout __lowercase = max_position_embeddings __lowercase = initializer_range __lowercase = None __lowercase = 0 __lowercase = 2 __lowercase = 1 def _lowercase ( self : Union[str, Any] ): return XGLMConfig.from_pretrained("facebook/xglm-564M" ) def _lowercase ( self : Tuple ): __lowercase = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = self.get_config() __lowercase = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowercase ( self : List[Any] ): return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=UpperCAmelCase__, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=UpperCAmelCase__, ) def _lowercase ( self : Dict ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) = config_and_inputs __lowercase = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCAmelCase : Any = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : List[str] = False __UpperCAmelCase : int = False def _lowercase ( self : Optional[Any] ): __lowercase = TFXGLMModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__, n_embd=3_7 ) def _lowercase ( self : Any ): self.config_tester.run_common_tests() @slow def _lowercase ( self : List[str] ): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = TFXGLMModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." ) def _lowercase ( self : int ): super().test_resize_token_embeddings() @require_tf class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int]=True ): __lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) __lowercase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]], dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __lowercase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on __lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), UpperCAmelCase__ ) @slow def _lowercase ( self : List[Any] ): __lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) __lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) tf.random.set_seed(0 ) __lowercase = tokenizer("Today is a nice day and", return_tensors="tf" ) __lowercase = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0" ): __lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, seed=[7, 0] ) __lowercase = tokenizer.decode(output_ids[0], skip_special_tokens=UpperCAmelCase__ ) __lowercase = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ ) @slow def _lowercase ( self : Dict ): __lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) __lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) __lowercase = "left" # use different length sentences to test batching __lowercase = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] __lowercase = tokenizer(UpperCAmelCase__, return_tensors="tf", padding=UpperCAmelCase__ ) __lowercase = inputs["input_ids"] __lowercase = model.generate(input_ids=UpperCAmelCase__, attention_mask=inputs["attention_mask"], max_new_tokens=1_2 ) __lowercase = tokenizer(sentences[0], return_tensors="tf" ).input_ids __lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 ) __lowercase = tokenizer(sentences[1], return_tensors="tf" ).input_ids __lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 ) __lowercase = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ ) __lowercase = tokenizer.decode(output_non_padded[0], skip_special_tokens=UpperCAmelCase__ ) __lowercase = tokenizer.decode(output_padded[0], skip_special_tokens=UpperCAmelCase__ ) __lowercase = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__, [non_padded_sentence, padded_sentence] )
17
0
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _A ( unittest.TestCase ): def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = tempfile.mkdtemp() __UpperCAmelCase : Tuple = BlipImageProcessor() __UpperCAmelCase : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" ) __UpperCAmelCase : Optional[int] = BlipProcessor(UpperCAmelCase__ , UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def __A ( self , **__UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).tokenizer def __A ( self , **__UpperCAmelCase ) -> Any: '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def __A ( self ) -> Any: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __UpperCAmelCase : Dict = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __UpperCAmelCase : Dict = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) __UpperCAmelCase : Union[str, Any] = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = self.get_image_processor() __UpperCAmelCase : List[Any] = self.get_tokenizer() __UpperCAmelCase : Union[str, Any] = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) __UpperCAmelCase : Optional[int] = self.prepare_image_inputs() __UpperCAmelCase : Dict = image_processor(UpperCAmelCase__ , return_tensors="""np""" ) __UpperCAmelCase : Any = processor(images=UpperCAmelCase__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : int = self.get_image_processor() __UpperCAmelCase : Dict = self.get_tokenizer() __UpperCAmelCase : str = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) __UpperCAmelCase : Any = """lower newer""" __UpperCAmelCase : Optional[int] = processor(text=UpperCAmelCase__ ) __UpperCAmelCase : Any = tokenizer(UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.get_image_processor() __UpperCAmelCase : Any = self.get_tokenizer() __UpperCAmelCase : Any = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) __UpperCAmelCase : Optional[int] = """lower newer""" __UpperCAmelCase : int = self.prepare_image_inputs() __UpperCAmelCase : List[str] = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase__ ): processor() def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : str = self.get_image_processor() __UpperCAmelCase : List[Any] = self.get_tokenizer() __UpperCAmelCase : Any = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) __UpperCAmelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __UpperCAmelCase : List[Any] = processor.batch_decode(UpperCAmelCase__ ) __UpperCAmelCase : str = tokenizer.batch_decode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.get_image_processor() __UpperCAmelCase : Union[str, Any] = self.get_tokenizer() __UpperCAmelCase : List[str] = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) __UpperCAmelCase : Tuple = """lower newer""" __UpperCAmelCase : Optional[int] = self.prepare_image_inputs() __UpperCAmelCase : str = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
254
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _a = '__DUMMY_TRANSFORMERS_USER__' _a = 'Dummy User' _a = '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' _a = 'https://hub-ci.huggingface.co' _a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}' _a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}' _a = Path('~/.huggingface/hub_ci_token').expanduser() @pytest.fixture def _A ( UpperCamelCase_ : List[Any]) -> Tuple: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : int) -> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT", UpperCamelCase_) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : str) -> Dict: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : List[Any]) -> List[str]: '''simple docstring''' HfFolder.save_token(UpperCamelCase_) yield HfFolder.delete_token() @pytest.fixture(scope="session") def _A ( ) -> List[Any]: '''simple docstring''' return HfApi(endpoint=UpperCamelCase_) @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi) -> List[Any]: '''simple docstring''' __lowercase = HfFolder.get_token() HfFolder.save_token(UpperCamelCase_) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : Dict) -> int: '''simple docstring''' def _cleanup_repo(UpperCamelCase_ : Optional[int]): hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") return _cleanup_repo @pytest.fixture def _A ( UpperCamelCase_ : str) -> Any: '''simple docstring''' @contextmanager def _temporary_repo(UpperCamelCase_ : Any): try: yield repo_id finally: cleanup_repo(UpperCamelCase_) return _temporary_repo @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : str, UpperCamelCase_ : Optional[int]) -> List[Any]: '''simple docstring''' __lowercase = F"""repo_txt_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data/text_data.txt", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Any, UpperCamelCase_ : Dict) -> Optional[int]: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]) -> int: '''simple docstring''' __lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Dict, UpperCamelCase_ : Any) -> int: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> List[Any]: '''simple docstring''' __lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> str: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
17
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a : Tuple = logging.get_logger(__name__) a : List[str] = { """facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""", } class UpperCamelCase_ ( __magic_name__ ): lowercase = "nllb-moe" lowercase = ["past_key_values"] lowercase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , A=128112 , A=1024 , A=12 , A=4096 , A=16 , A=12 , A=4096 , A=16 , A=0.0_5 , A=0.0_5 , A=True , A=True , A="relu" , A=1024 , A=0.1 , A=0.1 , A=0.0 , A=0.0_2 , A=2 , A=True , A=False , A="float32" , A=False , A=128 , A=64 , A=4 , A=4 , A=0.0_0_1 , A=0.0_0_1 , A="all" , A=False , A=False , A=1.0 , A=0.2 , A=1 , A=0 , A=2 , A=False , **A , ) -> List[str]: UpperCAmelCase : Union[str, Any] = vocab_size UpperCAmelCase : Union[str, Any] = max_position_embeddings UpperCAmelCase : int = d_model UpperCAmelCase : int = encoder_ffn_dim UpperCAmelCase : int = encoder_layers UpperCAmelCase : int = encoder_attention_heads UpperCAmelCase : int = decoder_ffn_dim UpperCAmelCase : Tuple = decoder_layers UpperCAmelCase : Union[str, Any] = decoder_attention_heads UpperCAmelCase : Optional[int] = dropout UpperCAmelCase : Any = attention_dropout UpperCAmelCase : List[str] = activation_dropout UpperCAmelCase : Union[str, Any] = activation_function UpperCAmelCase : Dict = init_std UpperCAmelCase : List[str] = encoder_layerdrop UpperCAmelCase : Optional[Any] = decoder_layerdrop UpperCAmelCase : str = use_cache UpperCAmelCase : Dict = encoder_layers UpperCAmelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase : List[Any] = router_z_loss_coef UpperCAmelCase : str = router_aux_loss_coef UpperCAmelCase : List[str] = decoder_sparse_step UpperCAmelCase : Optional[int] = encoder_sparse_step UpperCAmelCase : List[str] = num_experts UpperCAmelCase : Dict = expert_capacity UpperCAmelCase : int = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' ) UpperCAmelCase : Optional[Any] = router_dtype UpperCAmelCase : Union[str, Any] = router_ignore_padding_tokens UpperCAmelCase : Tuple = batch_prioritized_routing UpperCAmelCase : Optional[Any] = second_expert_policy UpperCAmelCase : Optional[int] = normalize_router_prob_before_dropping UpperCAmelCase : str = moe_eval_capacity_token_fraction UpperCAmelCase : Any = moe_token_dropout UpperCAmelCase : Optional[Any] = output_router_logits super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
265
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : int = "time_series_transformer" __UpperCAmelCase : Any = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self : int, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : str = "student_t", UpperCAmelCase__ : str = "nll", UpperCAmelCase__ : int = 1, UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7], UpperCAmelCase__ : Optional[Union[str, bool]] = "mean", UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : str = "gelu", UpperCAmelCase__ : int = 6_4, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : int = 1_0_0, UpperCAmelCase__ : float = 0.02, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : List[str], ): # time series specific configuration __lowercase = prediction_length __lowercase = context_length or prediction_length __lowercase = distribution_output __lowercase = loss __lowercase = input_size __lowercase = num_time_features __lowercase = lags_sequence __lowercase = scaling __lowercase = num_dynamic_real_features __lowercase = num_static_real_features __lowercase = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) __lowercase = cardinality else: __lowercase = [0] if embedding_dimension and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) __lowercase = embedding_dimension else: __lowercase = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality] __lowercase = num_parallel_samples # Transformer architecture configuration __lowercase = input_size * len(UpperCAmelCase__ ) + self._number_of_features __lowercase = d_model __lowercase = encoder_attention_heads __lowercase = decoder_attention_heads __lowercase = encoder_ffn_dim __lowercase = decoder_ffn_dim __lowercase = encoder_layers __lowercase = decoder_layers __lowercase = dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = encoder_layerdrop __lowercase = decoder_layerdrop __lowercase = activation_function __lowercase = init_std __lowercase = use_cache super().__init__(is_encoder_decoder=UpperCAmelCase__, **UpperCAmelCase__ ) @property def _lowercase ( self : Optional[Any] ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
17
0
"""simple docstring""" from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class UpperCamelCase__( __A ): lowerCAmelCase__ : Union[str, Any] = ["image_processor"] lowerCAmelCase__ : Optional[Any] = "SamImageProcessor" def __init__( self ,__UpperCAmelCase ) -> Dict: super().__init__(UpperCAmelCase__ ) A__ = self.image_processor A__ = -10 A__ = self.image_processor.size['longest_edge'] def __call__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> str: A__ = self.image_processor( UpperCAmelCase__ ,return_tensors=UpperCAmelCase__ ,**UpperCAmelCase__ ,) # pop arguments that are not used in the foward but used nevertheless A__ = encoding_image_processor['original_sizes'] if hasattr(UpperCAmelCase__ ,'numpy' ): # Checks if Torch or TF tensor A__ = original_sizes.numpy() A__ , A__ , A__ = self._check_and_preprocess_points( input_points=UpperCAmelCase__ ,input_labels=UpperCAmelCase__ ,input_boxes=UpperCAmelCase__ ,) A__ = self._normalize_and_convert( UpperCAmelCase__ ,UpperCAmelCase__ ,input_points=UpperCAmelCase__ ,input_labels=UpperCAmelCase__ ,input_boxes=UpperCAmelCase__ ,return_tensors=UpperCAmelCase__ ,) return encoding_image_processor def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase="pt" ,) -> List[str]: if input_points is not None: if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ): A__ = [ self._normalize_coordinates(self.target_size ,UpperCAmelCase__ ,original_sizes[0] ) for point in input_points ] else: A__ = [ self._normalize_coordinates(self.target_size ,UpperCAmelCase__ ,UpperCAmelCase__ ) for point, original_size in zip(UpperCAmelCase__ ,UpperCAmelCase__ ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: A__ , A__ = self._pad_points_and_labels(UpperCAmelCase__ ,UpperCAmelCase__ ) A__ = np.array(UpperCAmelCase__ ) if input_labels is not None: A__ = np.array(UpperCAmelCase__ ) if input_boxes is not None: if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ): A__ = [ self._normalize_coordinates(self.target_size ,UpperCAmelCase__ ,original_sizes[0] ,is_bounding_box=UpperCAmelCase__ ) for box in input_boxes ] else: A__ = [ self._normalize_coordinates(self.target_size ,UpperCAmelCase__ ,UpperCAmelCase__ ,is_bounding_box=UpperCAmelCase__ ) for box, original_size in zip(UpperCAmelCase__ ,UpperCAmelCase__ ) ] A__ = np.array(UpperCAmelCase__ ) if input_boxes is not None: if return_tensors == "pt": A__ = torch.from_numpy(UpperCAmelCase__ ) # boxes batch size of 1 by default A__ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": A__ = tf.convert_to_tensor(UpperCAmelCase__ ) # boxes batch size of 1 by default A__ = tf.expand_dims(UpperCAmelCase__ ,1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({'input_boxes': input_boxes} ) if input_points is not None: if return_tensors == "pt": A__ = torch.from_numpy(UpperCAmelCase__ ) # point batch size of 1 by default A__ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": A__ = tf.convert_to_tensor(UpperCAmelCase__ ) # point batch size of 1 by default A__ = tf.expand_dims(UpperCAmelCase__ ,1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({'input_points': input_points} ) if input_labels is not None: if return_tensors == "pt": A__ = torch.from_numpy(UpperCAmelCase__ ) # point batch size of 1 by default A__ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": A__ = tf.convert_to_tensor(UpperCAmelCase__ ) # point batch size of 1 by default A__ = tf.expand_dims(UpperCAmelCase__ ,1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({'input_labels': input_labels} ) return encoding_image_processor def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]: A__ = max([point.shape[0] for point in input_points] ) A__ = [] for i, point in enumerate(UpperCAmelCase__ ): if point.shape[0] != expected_nb_points: A__ = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] ,axis=0 ) A__ = np.append(input_labels[i] ,[self.point_pad_value] ) processed_input_points.append(UpperCAmelCase__ ) A__ = processed_input_points return input_points, input_labels def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Union[str, Any]: A__ , A__ = original_size A__ , A__ = self.image_processor._get_preprocess_shape(UpperCAmelCase__ ,longest_edge=UpperCAmelCase__ ) A__ = deepcopy(UpperCAmelCase__ ).astype(UpperCAmelCase__ ) if is_bounding_box: A__ = coords.reshape(-1 ,2 ,2 ) A__ = coords[..., 0] * (new_w / old_w) A__ = coords[..., 1] * (new_h / old_h) if is_bounding_box: A__ = coords.reshape(-1 ,4 ) return coords def snake_case__ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,) -> List[Any]: if input_points is not None: if hasattr(UpperCAmelCase__ ,'numpy' ): # Checks for TF or Torch tensor A__ = input_points.numpy().tolist() if not isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) or not isinstance(input_points[0] ,UpperCAmelCase__ ): raise ValueError('Input points must be a list of list of floating points.' ) A__ = [np.array(UpperCAmelCase__ ) for input_point in input_points] else: A__ = None if input_labels is not None: if hasattr(UpperCAmelCase__ ,'numpy' ): A__ = input_labels.numpy().tolist() if not isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) or not isinstance(input_labels[0] ,UpperCAmelCase__ ): raise ValueError('Input labels must be a list of list integers.' ) A__ = [np.array(UpperCAmelCase__ ) for label in input_labels] else: A__ = None if input_boxes is not None: if hasattr(UpperCAmelCase__ ,'numpy' ): A__ = input_boxes.numpy().tolist() if ( not isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) or not isinstance(input_boxes[0] ,UpperCAmelCase__ ) or not isinstance(input_boxes[0][0] ,UpperCAmelCase__ ) ): raise ValueError('Input boxes must be a list of list of list of floating points.' ) A__ = [np.array(UpperCAmelCase__ ).astype(np.floataa ) for box in input_boxes] else: A__ = None return input_points, input_labels, input_boxes @property def snake_case__ ( self ) -> Union[str, Any]: A__ = self.image_processor.model_input_names return list(dict.fromkeys(UpperCAmelCase__ ) ) def snake_case__ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: return self.image_processor.post_process_masks(*UpperCAmelCase__ ,**UpperCAmelCase__ )
221
"""simple docstring""" import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _lowerCAmelCase : """simple docstring""" @staticmethod def _lowercase ( *UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : List[Any] ): pass def _A ( UpperCamelCase_ : Union[str, Any]) -> Any: '''simple docstring''' return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. _a = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any] ): __lowercase = pipeline( "document-question-answering", model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ ) __lowercase = INVOICE_URL __lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) ) __lowercase = "What is the placebo?" __lowercase = [ { "image": load_image(UpperCAmelCase__ ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def _lowercase ( self : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any ): __lowercase = dqa_pipeline(UpperCAmelCase__, top_k=2 ) self.assertEqual( UpperCAmelCase__, [ [ {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )}, {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )}, ] ] * 3, ) @require_torch @require_detectrona @require_pytesseract def _lowercase ( self : Dict ): __lowercase = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2" ) __lowercase = INVOICE_URL __lowercase = "How many cats are there?" __lowercase = [ {"score": 0.0_001, "answer": "oy 2312/2019", "start": 3_8, "end": 3_9}, {"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 3_8, "end": 4_0}, ] __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably __lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual(UpperCAmelCase__, [] ) # We can optionnally pass directly the words and bounding boxes __lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png" __lowercase = [] __lowercase = [] __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, words=UpperCAmelCase__, boxes=UpperCAmelCase__, top_k=2 ) self.assertEqual(UpperCAmelCase__, [] ) @slow @require_torch @require_detectrona @require_pytesseract def _lowercase ( self : List[str] ): __lowercase = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ] * 2, ) @slow @require_torch @require_detectrona @require_pytesseract def _lowercase ( self : Dict ): __lowercase = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=5_0, ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6}, ] ] * 2, ) @slow @require_torch @require_pytesseract @require_vision def _lowercase ( self : Optional[Any] ): __lowercase = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ ) __lowercase = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ], ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ] ] * 2, ) __lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) ) # This model should also work if `image` is set to None __lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ], ) @slow @require_torch @require_pytesseract @require_vision def _lowercase ( self : Union[str, Any] ): __lowercase = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ ) __lowercase = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", max_seq_len=5_0, ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6}, ] ] * 2, ) __lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) ) # This model should also work if `image` is set to None __lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) @slow @require_torch def _lowercase ( self : Dict ): __lowercase = pipeline( "document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def _lowercase ( self : List[Any] ): pass
17
0
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def lowerCAmelCase__( *lowercase : List[Any] ) -> List[str]: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): __snake_case : Dict = list(UpperCamelCase_ ) for i in range(len(UpperCamelCase_ ) ): __snake_case : int = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def lowerCAmelCase__( lowercase : Exception ) -> bool: __snake_case : Optional[int] = [ "CUDA out of memory.", # CUDA OOM "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU "DefaultCPUAllocator: can't allocate memory", # CPU OOM ] if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def lowerCAmelCase__( lowercase : callable = None , lowercase : int = 128 ) -> int: if function is None: return functools.partial(UpperCamelCase_ , starting_batch_size=UpperCamelCase_ ) __snake_case : Any = starting_batch_size def decorator(*lowercase : List[str] , **lowercase : Optional[Any] ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() __snake_case : Union[str, Any] = list(inspect.signature(UpperCamelCase_ ).parameters.keys() ) # Guard against user error if len(UpperCamelCase_ ) < (len(UpperCamelCase_ ) + 1): __snake_case : Dict = ", ".join([f"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f"""Batch size was passed into `{function.__name__}` as the first argument when called.""" f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" ) while True: if batch_size == 0: raise RuntimeError("No executable batch size found, reached zero." ) try: return function(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) except Exception as e: if should_reduce_batch_size(UpperCamelCase_ ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
326
"""simple docstring""" import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() _a = 2 class _lowerCAmelCase : """simple docstring""" def __init__( self : Dict, *, # begin keyword-only arguments UpperCAmelCase__ : str="<s>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : str="</s>", UpperCAmelCase__ : Optional[Any]="<unk>", UpperCAmelCase__ : List[Any]=None, ): __lowercase ,__lowercase ,__lowercase ,__lowercase = bos, unk, pad, eos __lowercase = [] __lowercase = [] __lowercase = {} __lowercase = self.add_symbol(UpperCAmelCase__ ) __lowercase = self.add_symbol(UpperCAmelCase__ ) __lowercase = self.add_symbol(UpperCAmelCase__ ) __lowercase = self.add_symbol(UpperCAmelCase__ ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(UpperCAmelCase__ ) __lowercase = len(self.symbols ) def __eq__( self : List[str], UpperCAmelCase__ : Dict ): return self.indices == other.indices def __getitem__( self : Optional[int], UpperCAmelCase__ : List[str] ): if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : str ): return len(self.symbols ) def __contains__( self : Any, UpperCAmelCase__ : Optional[Any] ): return sym in self.indices @classmethod def _lowercase ( cls : List[Any], UpperCAmelCase__ : Optional[Any] ): __lowercase = cls() d.add_from_file(UpperCAmelCase__ ) return d def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : str=False ): if word in self.indices and not overwrite: __lowercase = self.indices[word] __lowercase = self.count[idx] + n return idx else: __lowercase = len(self.symbols ) __lowercase = idx self.symbols.append(UpperCAmelCase__ ) self.count.append(UpperCAmelCase__ ) return idx def _lowercase ( self : Any, UpperCAmelCase__ : str ): return 0 def _lowercase ( self : Tuple, UpperCAmelCase__ : List[Any] ): if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): try: with open(UpperCAmelCase__, "r", encoding="utf-8" ) as fd: self.add_from_file(UpperCAmelCase__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCAmelCase__ ) ) return __lowercase = f.readlines() __lowercase = self._load_meta(UpperCAmelCase__ ) for line in lines[indices_start_line:]: try: __lowercase ,__lowercase = line.rstrip().rsplit(" ", 1 ) if field == "#fairseq:overwrite": __lowercase = True __lowercase ,__lowercase = line.rsplit(" ", 1 ) else: __lowercase = False __lowercase = int(UpperCAmelCase__ ) __lowercase = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(UpperCAmelCase__ ) ) self.add_symbol(UpperCAmelCase__, n=UpperCAmelCase__, overwrite=UpperCAmelCase__ ) except ValueError: raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" ) def _A ( UpperCamelCase_ : int) -> str: '''simple docstring''' __lowercase = dict((re.sub(r"@@$", "", UpperCamelCase_), v) if k.endswith("@@") else (re.sub(r"$", "</w>", UpperCamelCase_), v) for k, v in d.items()) __lowercase = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[F"""{k}</w>"""] __lowercase = d[k] # restore return da def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> List[Any]: '''simple docstring''' if not os.path.exists(UpperCamelCase_): raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""") os.makedirs(UpperCamelCase_, exist_ok=UpperCamelCase_) print(F"""Writing results to {pytorch_dump_folder_path}""") # handle various types of models __lowercase = os.path.join(UpperCamelCase_, "checkpoint.pt") if not os.path.isfile(UpperCamelCase_): raise ValueError(F"""path to the file {checkpoint_file} does not exist!""") __lowercase = torch.load(UpperCamelCase_, map_location="cpu") __lowercase = chkpt["cfg"]["model"] # dicts __lowercase = os.path.join(UpperCamelCase_, "dict.txt") if not os.path.isfile(UpperCamelCase_): raise ValueError(F"""path to the file {dict_file} does not exist!""") __lowercase = Dictionary.load(UpperCamelCase_) __lowercase = rewrite_dict_keys(src_dict.indices) __lowercase = len(UpperCamelCase_) __lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["vocab_file"]) print(F"""Generating {src_vocab_file} of {src_vocab_size} records""") with open(UpperCamelCase_, "w", encoding="utf-8") as f: f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_)) # merges_file (bpecodes) __lowercase = os.path.join(UpperCamelCase_, "bpecodes") if not os.path.isfile(UpperCamelCase_): raise ValueError(F"""path to the file {bpecodes_file} does not exist!""") __lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["merges_file"]) shutil.copyfile(UpperCamelCase_, UpperCamelCase_) # model config __lowercase = os.path.join(UpperCamelCase_, "config.json") __lowercase = { "activation_dropout": args["activation_dropout"], "architectures": ["BioGptForCausalLM"], "attention_probs_dropout_prob": args["attention_dropout"], "bos_token_id": 0, "eos_token_id": 2, "hidden_act": args["activation_fn"], "hidden_dropout_prob": args["dropout"], "hidden_size": args["decoder_embed_dim"], "initializer_range": 0.02, "intermediate_size": args["decoder_ffn_embed_dim"], "layer_norm_eps": 1E-12, "layerdrop": args["decoder_layerdrop"], "max_position_embeddings": args["max_target_positions"], "model_type": "biogpt", "num_attention_heads": args["decoder_attention_heads"], "num_hidden_layers": args["decoder_layers"], "pad_token_id": 1, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_decoder_input_output_embed"], "vocab_size": src_vocab_size, } # good hparam defaults to start with print(F"""Generating {biogpt_model_config_file}""") with open(UpperCamelCase_, "w", encoding="utf-8") as f: f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_)) # tokenizer config __lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_) __lowercase = { "bos_token": "<s>", "eos_token": "</s>", "model_max_length": 1024, "pad_token": "<pad>", "special_tokens_map_file": None, "tokenizer_class": "BioGptTokenizer", "unk_token": "<unk>", } print(F"""Generating {biogpt_tokenizer_config_file}""") with open(UpperCamelCase_, "w", encoding="utf-8") as f: f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_)) # model __lowercase = chkpt["model"] # remove unneeded keys __lowercase = [ "decoder.version", ] for k in ignore_keys: model_state_dict.pop(UpperCamelCase_, UpperCamelCase_) __lowercase = list(model_state_dict.keys()) for layer_name in layer_names: if layer_name.endswith("output_projection.weight"): __lowercase = model_state_dict.pop(UpperCamelCase_) else: __lowercase = model_state_dict.pop(UpperCamelCase_) __lowercase = BioGptConfig.from_pretrained(UpperCamelCase_) __lowercase = BioGptForCausalLM(UpperCamelCase_) # check that it loads ok model_new.load_state_dict(UpperCamelCase_) # save __lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_) print(F"""Generating {pytorch_weights_dump_path}""") torch.save(UpperCamelCase_, UpperCamelCase_) print("Conversion is done!") if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--biogpt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _a = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
17
0
import unittest import numpy as np def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = None,): _A : Tuple = np.shape(UpperCamelCase_ ) _A : List[Any] = np.shape(UpperCamelCase_ ) _A : int = np.shape(UpperCamelCase_ ) if shape_a[0] != shape_b[0]: _A : List[str] = ( """Expected the same number of rows for A and B. """ f'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(UpperCamelCase_ ) if shape_b[1] != shape_c[1]: _A : int = ( """Expected the same number of columns for B and C. """ f'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(UpperCamelCase_ ) _A : int = pseudo_inv if a_inv is None: try: _A : List[Any] = np.linalg.inv(UpperCamelCase_ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class lowercase ( unittest.TestCase ): def a__ ( self ) -> Any: _A : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _A : int = np.array([[0, 3], [3, 0], [2, 3]] ) _A : Union[str, Any] = np.array([[2, 1], [6, 3]] ) _A : Optional[Any] = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) _A : Optional[int] = np.block([[a, b], [b.T, c]] ) _A : List[str] = np.linalg.det(UpperCAmelCase__ ) _A : Optional[Any] = np.linalg.det(UpperCAmelCase__ ) _A : int = np.linalg.det(UpperCAmelCase__ ) self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s ) def a__ ( self ) -> Any: _A : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _A : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _A : Any = np.array([[2, 1], [6, 3]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def a__ ( self ) -> str: _A : Optional[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _A : Any = np.array([[0, 3], [3, 0], [2, 3]] ) _A : Optional[Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
26
"""simple docstring""" from __future__ import annotations from typing import Any class _lowerCAmelCase : """simple docstring""" def __init__( self : Any, UpperCAmelCase__ : int ): __lowercase = num_of_nodes __lowercase = [] __lowercase = {} def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ): self.m_edges.append([u_node, v_node, weight] ) def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def _lowercase ( self : List[Any], UpperCAmelCase__ : int ): if self.m_component[u_node] != u_node: for k in self.m_component: __lowercase = self.find_component(UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ): if component_size[u_node] <= component_size[v_node]: __lowercase = v_node component_size[v_node] += component_size[u_node] self.set_component(UpperCAmelCase__ ) elif component_size[u_node] >= component_size[v_node]: __lowercase = self.find_component(UpperCAmelCase__ ) component_size[u_node] += component_size[v_node] self.set_component(UpperCAmelCase__ ) def _lowercase ( self : Any ): __lowercase = [] __lowercase = 0 __lowercase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) __lowercase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: __lowercase ,__lowercase ,__lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): __lowercase = [u, v, w] for edge in minimum_weight_edge: if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase ,__lowercase ,__lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 __lowercase = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def _A ( ) -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
17
0
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : List[Any] = BarthezTokenizer UpperCAmelCase : Optional[Any] = BarthezTokenizerFast UpperCAmelCase : str = True UpperCAmelCase : Optional[int] = True def lowerCAmelCase_ ( self : Union[str, Any] ): super().setUp() _A = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=UpperCAmelCase__ ) _A = tokenizer def lowerCAmelCase_ ( self : int ): _A = '<pad>' _A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCAmelCase_ ( self : str ): _A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(UpperCAmelCase__ ) , 101_122 ) def lowerCAmelCase_ ( self : Optional[Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 101_122 ) @require_torch def lowerCAmelCase_ ( self : int ): _A = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _A = [0, 57, 3_018, 70_307, 91, 2] _A = self.tokenizer( UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='pt' ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) _A = batch.input_ids.tolist()[0] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase_ ( self : Optional[Any] ): if not self.test_rust_tokenizer: return _A = self.get_tokenizer() _A = self.get_rust_tokenizer() _A = 'I was born in 92000, and this is falsé.' _A = tokenizer.tokenize(UpperCAmelCase__ ) _A = rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) _A = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) _A = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) _A = self.get_rust_tokenizer() _A = tokenizer.encode(UpperCAmelCase__ ) _A = rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def lowerCAmelCase_ ( self : Union[str, Any] ): # fmt: off _A = {'input_ids': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _A = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=UpperCAmelCase__ , )
315
"""simple docstring""" from math import sqrt def _A ( UpperCamelCase_ : int) -> int: '''simple docstring''' __lowercase = 0 for i in range(1, int(sqrt(UpperCamelCase_) + 1)): if n % i == 0 and i != sqrt(UpperCamelCase_): total += i + n // i elif i == sqrt(UpperCamelCase_): total += i return total - n def _A ( UpperCamelCase_ : int = 10000) -> int: '''simple docstring''' __lowercase = sum( i for i in range(1, UpperCamelCase_) if sum_of_divisors(sum_of_divisors(UpperCamelCase_)) == i and sum_of_divisors(UpperCamelCase_) != i) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
17
0
import pickle import numpy as np from matplotlib import pyplot as plt class lowercase_ : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0.2 , __SCREAMING_SNAKE_CASE=0.2 ) ->int: lowerCAmelCase = bp_numa lowerCAmelCase = bp_numa lowerCAmelCase = bp_numa lowerCAmelCase = conva_get[:2] lowerCAmelCase = conva_get[2] lowerCAmelCase = size_pa lowerCAmelCase = rate_w lowerCAmelCase = rate_t lowerCAmelCase = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowerCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowerCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) lowerCAmelCase = -2 * np.random.rand(self.conva[1] ) + 1 lowerCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1 lowerCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1 def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]: # save model dict with pickle lowerCAmelCase = { '''num_bp1''': self.num_bpa, '''num_bp2''': self.num_bpa, '''num_bp3''': self.num_bpa, '''conv1''': self.conva, '''step_conv1''': self.step_conva, '''size_pooling1''': self.size_poolinga, '''rate_weight''': self.rate_weight, '''rate_thre''': self.rate_thre, '''w_conv1''': self.w_conva, '''wkj''': self.wkj, '''vji''': self.vji, '''thre_conv1''': self.thre_conva, '''thre_bp2''': self.thre_bpa, '''thre_bp3''': self.thre_bpa, } with open(UpperCAmelCase__ , '''wb''' ) as f: pickle.dump(UpperCAmelCase__ , UpperCAmelCase__ ) print(F"Model saved: {save_path}" ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE ) ->List[str]: # read saved model with open(UpperCAmelCase__ , '''rb''' ) as f: lowerCAmelCase = pickle.load(UpperCAmelCase__ ) # noqa: S301 lowerCAmelCase = model_dic.get('''conv1''' ) conv_get.append(model_dic.get('''step_conv1''' ) ) lowerCAmelCase = model_dic.get('''size_pooling1''' ) lowerCAmelCase = model_dic.get('''num_bp1''' ) lowerCAmelCase = model_dic.get('''num_bp2''' ) lowerCAmelCase = model_dic.get('''num_bp3''' ) lowerCAmelCase = model_dic.get('''rate_weight''' ) lowerCAmelCase = model_dic.get('''rate_thre''' ) # create model instance lowerCAmelCase = CNN(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # modify model parameter lowerCAmelCase = model_dic.get('''w_conv1''' ) lowerCAmelCase = model_dic.get('''wkj''' ) lowerCAmelCase = model_dic.get('''vji''' ) lowerCAmelCase = model_dic.get('''thre_conv1''' ) lowerCAmelCase = model_dic.get('''thre_bp2''' ) lowerCAmelCase = model_dic.get('''thre_bp3''' ) return conv_ins def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple: return 1 / (1 + np.exp(-1 * x )) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]: return round(UpperCAmelCase__ , 3 ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[Any]: # convolution process lowerCAmelCase = convs[0] lowerCAmelCase = convs[1] lowerCAmelCase = np.shape(UpperCAmelCase__ )[0] # get the data slice of original image data, data_focus lowerCAmelCase = [] for i_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase__ ): for j_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase__ ): lowerCAmelCase = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(UpperCAmelCase__ ) # calculate the feature map of every single kernel, and saved as list of matrix lowerCAmelCase = [] lowerCAmelCase = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(UpperCAmelCase__ ): lowerCAmelCase = [] for i_focus in range(len(UpperCAmelCase__ ) ): lowerCAmelCase = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(UpperCAmelCase__ ) ) lowerCAmelCase = np.asmatrix(UpperCAmelCase__ ).reshape( UpperCAmelCase__ , UpperCAmelCase__ ) data_featuremap.append(UpperCAmelCase__ ) # expanding the data slice to One dimenssion lowerCAmelCase = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(UpperCAmelCase__ ) ) lowerCAmelCase = np.asarray(UpperCAmelCase__ ) return focus_list, data_featuremap def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="average_pool" ) ->Union[str, Any]: # pooling process lowerCAmelCase = len(featuremaps[0] ) lowerCAmelCase = int(size_map / size_pooling ) lowerCAmelCase = [] for i_map in range(len(UpperCAmelCase__ ) ): lowerCAmelCase = featuremaps[i_map] lowerCAmelCase = [] for i_focus in range(0 , UpperCAmelCase__ , UpperCAmelCase__ ): for j_focus in range(0 , UpperCAmelCase__ , UpperCAmelCase__ ): lowerCAmelCase = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(UpperCAmelCase__ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(UpperCAmelCase__ ) ) lowerCAmelCase = np.asmatrix(UpperCAmelCase__ ).reshape(UpperCAmelCase__ , UpperCAmelCase__ ) featuremap_pooled.append(UpperCAmelCase__ ) return featuremap_pooled def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]: # expanding three dimension data to one dimension list lowerCAmelCase = [] for i in range(len(UpperCAmelCase__ ) ): lowerCAmelCase = np.shape(data[i] ) lowerCAmelCase = data[i].reshape(1 , shapes[0] * shapes[1] ) lowerCAmelCase = data_listed.getA().tolist()[0] data_expanded.extend(UpperCAmelCase__ ) lowerCAmelCase = np.asarray(UpperCAmelCase__ ) return data_expanded def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[Any]: # expanding matrix to one dimension list lowerCAmelCase = np.asarray(UpperCAmelCase__ ) lowerCAmelCase = np.shape(UpperCAmelCase__ ) lowerCAmelCase = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[Any]: lowerCAmelCase = [] lowerCAmelCase = 0 for i_map in range(UpperCAmelCase__ ): lowerCAmelCase = np.ones((size_map, size_map) ) for i in range(0 , UpperCAmelCase__ , UpperCAmelCase__ ): for j in range(0 , UpperCAmelCase__ , UpperCAmelCase__ ): lowerCAmelCase = pd_pool[ i_pool ] lowerCAmelCase = i_pool + 1 lowerCAmelCase = np.multiply( UpperCAmelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(UpperCAmelCase__ ) return pd_all def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=bool ) ->List[str]: # model traning print('''----------------------Start Training-------------------------''' ) print((''' - - Shape: Train_Data ''', np.shape(UpperCAmelCase__ )) ) print((''' - - Shape: Teach_Data ''', np.shape(UpperCAmelCase__ )) ) lowerCAmelCase = 0 lowerCAmelCase = [] lowerCAmelCase = 10000 while rp < n_repeat and mse >= error_accuracy: lowerCAmelCase = 0 print(F"-------------Learning Time {rp}--------------" ) for p in range(len(UpperCAmelCase__ ) ): # print('------------Learning Image: %d--------------'%p) lowerCAmelCase = np.asmatrix(datas_train[p] ) lowerCAmelCase = np.asarray(datas_teach[p] ) lowerCAmelCase , lowerCAmelCase = self.convolute( UpperCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowerCAmelCase = self.pooling(UpperCAmelCase__ , self.size_poolinga ) lowerCAmelCase = np.shape(UpperCAmelCase__ ) lowerCAmelCase = self._expand(UpperCAmelCase__ ) lowerCAmelCase = data_bp_input lowerCAmelCase = np.dot(UpperCAmelCase__ , self.vji.T ) - self.thre_bpa lowerCAmelCase = self.sig(UpperCAmelCase__ ) lowerCAmelCase = np.dot(UpperCAmelCase__ , self.wkj.T ) - self.thre_bpa lowerCAmelCase = self.sig(UpperCAmelCase__ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowerCAmelCase = np.multiply( (data_teach - bp_outa) , np.multiply(UpperCAmelCase__ , (1 - bp_outa) ) ) lowerCAmelCase = np.multiply( np.dot(UpperCAmelCase__ , self.wkj ) , np.multiply(UpperCAmelCase__ , (1 - bp_outa) ) ) lowerCAmelCase = np.dot(UpperCAmelCase__ , self.vji ) lowerCAmelCase = pd_i_all / (self.size_poolinga * self.size_poolinga) lowerCAmelCase = pd_conva_pooled.T.getA().tolist() lowerCAmelCase = self._calculate_gradient_from_pool( UpperCAmelCase__ , UpperCAmelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowerCAmelCase = self._expand_mat(pd_conva_all[k_conv] ) lowerCAmelCase = self.rate_weight * np.dot(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowerCAmelCase = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowerCAmelCase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowerCAmelCase = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowerCAmelCase = self.thre_bpa - pd_k_all * self.rate_thre lowerCAmelCase = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowerCAmelCase = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowerCAmelCase = rp + 1 lowerCAmelCase = error_count / patterns all_mse.append(UpperCAmelCase__ ) def draw_error(): lowerCAmelCase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(UpperCAmelCase__ , '''+-''' ) plt.plot(UpperCAmelCase__ , '''r--''' ) plt.xlabel('''Learning Times''' ) plt.ylabel('''All_mse''' ) plt.grid(UpperCAmelCase__ , alpha=0.5 ) plt.show() print('''------------------Training Complished---------------------''' ) print((''' - - Training epoch: ''', rp, F" - - Mse: {mse:.6f}") ) if draw_e: draw_error() return mse def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Union[str, Any]: # model predict lowerCAmelCase = [] print('''-------------------Start Testing-------------------------''' ) print((''' - - Shape: Test_Data ''', np.shape(UpperCAmelCase__ )) ) for p in range(len(UpperCAmelCase__ ) ): lowerCAmelCase = np.asmatrix(datas_test[p] ) lowerCAmelCase , lowerCAmelCase = self.convolute( UpperCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowerCAmelCase = self.pooling(UpperCAmelCase__ , self.size_poolinga ) lowerCAmelCase = self._expand(UpperCAmelCase__ ) lowerCAmelCase = data_bp_input lowerCAmelCase = bp_outa * self.vji.T - self.thre_bpa lowerCAmelCase = self.sig(UpperCAmelCase__ ) lowerCAmelCase = bp_outa * self.wkj.T - self.thre_bpa lowerCAmelCase = self.sig(UpperCAmelCase__ ) produce_out.extend(bp_outa.getA().tolist() ) lowerCAmelCase = [list(map(self.do_round , UpperCAmelCase__ ) ) for each in produce_out] return np.asarray(UpperCAmelCase__ ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple: # return the data of image after convoluting process so we can check it out lowerCAmelCase = np.asmatrix(UpperCAmelCase__ ) lowerCAmelCase , lowerCAmelCase = self.convolute( UpperCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) lowerCAmelCase = self.pooling(UpperCAmelCase__ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
338
"""simple docstring""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _a = _symbol_database.Default() _a = _descriptor_pool.Default().AddSerializedFile( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) _a = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: _a = None _a = b'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _a = 45 _a = 15_81 _a = 15_17 _a = 15_70 _a = 15_84 _a = 17_93 _a = 17_95 _a = 19_16 _a = 18_64 _a = 19_05 _a = 19_19 _a = 24_29 _a = 22_08 _a = 24_18 _a = 23_23 _a = 24_07 # @@protoc_insertion_point(module_scope)
17
0
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() a__ : List[str] =logging.get_logger(__name__) def lowercase__ ( __lowercase : int ) -> Any: """simple docstring""" __UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if key.startswith('module.encoder' ): __UpperCamelCase = key.replace('module.encoder' , 'glpn.encoder' ) if key.startswith('module.decoder' ): __UpperCamelCase = key.replace('module.decoder' , 'decoder.stages' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 __UpperCamelCase = key[key.find('patch_embed' ) + len('patch_embed' )] __UpperCamelCase = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase_ )-1}''' ) if "norm" in key: __UpperCamelCase = key.replace('norm' , 'layer_norm' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 __UpperCamelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )] __UpperCamelCase = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase_ )-1}''' ) if "layer_norm1" in key: __UpperCamelCase = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: __UpperCamelCase = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 __UpperCamelCase = key[key.find('block' ) + len('block' )] __UpperCamelCase = key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase_ )-1}''' ) if "attn.q" in key: __UpperCamelCase = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: __UpperCamelCase = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: __UpperCamelCase = key.replace('attn' , 'attention.self' ) if "fc1" in key: __UpperCamelCase = key.replace('fc1' , 'dense1' ) if "fc2" in key: __UpperCamelCase = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: __UpperCamelCase = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: __UpperCamelCase = key.replace('linear_fuse.conv' , 'linear_fuse' ) __UpperCamelCase = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 __UpperCamelCase = key[key.find('linear_c' ) + len('linear_c' )] __UpperCamelCase = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase_ )-1}''' ) if "bot_conv" in key: __UpperCamelCase = key.replace('bot_conv' , '0.convolution' ) if "skip_conv1" in key: __UpperCamelCase = key.replace('skip_conv1' , '1.convolution' ) if "skip_conv2" in key: __UpperCamelCase = key.replace('skip_conv2' , '2.convolution' ) if "fusion1" in key: __UpperCamelCase = key.replace('fusion1' , '1.fusion' ) if "fusion2" in key: __UpperCamelCase = key.replace('fusion2' , '2.fusion' ) if "fusion3" in key: __UpperCamelCase = key.replace('fusion3' , '3.fusion' ) if "fusion" in key and "conv" in key: __UpperCamelCase = key.replace('conv' , 'convolutional_layer' ) if key.startswith('module.last_layer_depth' ): __UpperCamelCase = key.replace('module.last_layer_depth' , 'head.head' ) __UpperCamelCase = value return new_state_dict def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Optional[Any]: """simple docstring""" for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) __UpperCamelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) __UpperCamelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict __UpperCamelCase = kv_weight[ : config.hidden_sizes[i], : ] __UpperCamelCase = kv_bias[: config.hidden_sizes[i]] __UpperCamelCase = kv_weight[ config.hidden_sizes[i] :, : ] __UpperCamelCase = kv_bias[config.hidden_sizes[i] :] def lowercase__ ( ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return image @torch.no_grad() def lowercase__ ( __lowercase : Any , __lowercase : int , __lowercase : int=False , __lowercase : str=None ) -> Any: """simple docstring""" __UpperCamelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) __UpperCamelCase = GLPNImageProcessor() # prepare image __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors='pt' ).pixel_values logger.info('Converting model...' ) # load original state dict __UpperCamelCase = torch.load(UpperCamelCase_ , map_location=torch.device('cpu' ) ) # rename keys __UpperCamelCase = rename_keys(UpperCamelCase_ ) # key and value matrices need special treatment read_in_k_v(UpperCamelCase_ , UpperCamelCase_ ) # create HuggingFace model and load state dict __UpperCamelCase = GLPNForDepthEstimation(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() # forward pass __UpperCamelCase = model(UpperCamelCase_ ) __UpperCamelCase = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: __UpperCamelCase = torch.tensor( [[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] ) elif "kitti" in model_name: __UpperCamelCase = torch.tensor( [[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) __UpperCamelCase = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase_ , atol=1e-4 ) print('Looks ok!' ) # finally, push to hub if required if push_to_hub: logger.info('Pushing model and image processor to the hub...' ) model.push_to_hub( repo_path_or_name=Path(UpperCamelCase_ , UpperCamelCase_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=UpperCamelCase_ , ) image_processor.push_to_hub( repo_path_or_name=Path(UpperCamelCase_ , UpperCamelCase_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=UpperCamelCase_ , ) if __name__ == "__main__": a__ : Tuple =argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) parser.add_argument( '''--model_name''', default='''glpn-kitti''', type=str, help='''Name of the model in case you\'re pushing to the hub.''', ) a__ : Union[str, Any] =parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
53
"""simple docstring""" import baseaa def _A ( UpperCamelCase_ : str) -> bytes: '''simple docstring''' return baseaa.baaencode(string.encode("utf-8")) def _A ( UpperCamelCase_ : bytes) -> str: '''simple docstring''' return baseaa.baadecode(UpperCamelCase_).decode("utf-8") if __name__ == "__main__": _a = 'Hello World!' _a = baseaa_encode(test) print(encoded) _a = baseaa_decode(encoded) print(decoded)
17
0
import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) __lowerCamelCase = logging.getLogger(__name__) __lowerCamelCase = """Hello world! cécé herlolip""" __lowerCamelCase = namedtuple( """BertAbsConfig""", [ """temp_dir""", """large""", """use_bert_emb""", """finetune_bert""", """encoder""", """share_emb""", """max_pos""", """enc_layers""", """enc_hidden_size""", """enc_heads""", """enc_ff_size""", """enc_dropout""", """dec_layers""", """dec_hidden_size""", """dec_heads""", """dec_ff_size""", """dec_dropout""", ], ) def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ): snake_case : List[Any] = BertAbsConfig( temp_dir="." , finetune_bert=UpperCamelCase_ , large=UpperCamelCase_ , share_emb=UpperCamelCase_ , use_bert_emb=UpperCamelCase_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , ) snake_case : Any = torch.load(UpperCamelCase_ , lambda __lowerCamelCase , __lowerCamelCase : storage ) snake_case : Optional[int] = AbsSummarizer(UpperCamelCase_ , torch.device("cpu" ) , UpperCamelCase_ ) original.eval() snake_case : Any = BertAbsSummarizer(UpperCamelCase_ , torch.device("cpu" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("convert the model" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("Make sure that the models' outputs are identical" ) snake_case : List[Any] = BertTokenizer.from_pretrained("bert-base-uncased" ) # prepare the model inputs snake_case : List[Any] = tokenizer.encode("This is sample éàalj'-." ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCamelCase_ )) ) snake_case : Optional[Any] = torch.tensor(UpperCamelCase_ ).unsqueeze(0 ) snake_case : Any = tokenizer.encode("This is sample 3 éàalj'-." ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCamelCase_ )) ) snake_case : Optional[int] = torch.tensor(UpperCamelCase_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass snake_case : List[Any] = encoder_input_ids snake_case : Any = decoder_input_ids snake_case : Dict = None snake_case : Optional[Any] = None snake_case : Optional[Any] = None snake_case : Dict = None snake_case : int = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical snake_case : List[Any] = original(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )[0] snake_case : List[Any] = original.generator(UpperCamelCase_ ) snake_case : List[str] = new_model( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )[0] snake_case : str = new_model.generator(UpperCamelCase_ ) snake_case : Optional[int] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("Maximum absolute difference beween weights: {:.2f}".format(UpperCamelCase_ ) ) snake_case : Tuple = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("Maximum absolute difference beween weights: {:.2f}".format(UpperCamelCase_ ) ) snake_case : Optional[Any] = torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) if are_identical: logging.info("all weights are equal up to 1e-3" ) else: raise ValueError("the weights are different. The new model is likely different from the original one." ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("saving the model's state dictionary" ) torch.save( new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() parser.add_argument( """--bertabs_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""", ) __lowerCamelCase = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
59
"""simple docstring""" def _A ( UpperCamelCase_ : Any) -> List[str]: '''simple docstring''' __lowercase ,__lowercase = [], [] while len(UpperCamelCase_) > 1: __lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_) start.append(UpperCamelCase_) end.append(UpperCamelCase_) collection.remove(UpperCamelCase_) collection.remove(UpperCamelCase_) end.reverse() return start + collection + end if __name__ == "__main__": _a = input('Enter numbers separated by a comma:\n').strip() _a = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
17
0
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class __lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]): SCREAMING_SNAKE_CASE_: Dict = parent def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return {} def A_ ( ): SCREAMING_SNAKE_CASE_: Union[str, Any] = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>" SCREAMING_SNAKE_CASE_: Optional[int] = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n " return [html_string_a, html_string_a] @require_bsa class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Tuple = MarkupLMFeatureExtractor if is_bsa_available() else None def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: Optional[int] = MarkupLMFeatureExtractionTester(self) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): return self.feature_extract_tester.prepare_feat_extract_dict() def _SCREAMING_SNAKE_CASE ( self : List[Any]): # Initialize feature_extractor SCREAMING_SNAKE_CASE_: Union[str, Any] = self.feature_extraction_class() # Test not batched input SCREAMING_SNAKE_CASE_: List[Any] = get_html_strings()[0] SCREAMING_SNAKE_CASE_: List[Any] = feature_extractor(UpperCAmelCase__) # fmt: off SCREAMING_SNAKE_CASE_: Optional[Any] = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]] SCREAMING_SNAKE_CASE_: Tuple = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]] # fmt: on self.assertEqual(encoding.nodes , UpperCAmelCase__) self.assertEqual(encoding.xpaths , UpperCAmelCase__) # Test batched SCREAMING_SNAKE_CASE_: Tuple = get_html_strings() SCREAMING_SNAKE_CASE_: Any = feature_extractor(UpperCAmelCase__) # fmt: off SCREAMING_SNAKE_CASE_: int = expected_nodes + [["My First Heading", "My first paragraph."]] SCREAMING_SNAKE_CASE_: Optional[Any] = expected_xpaths + [["/html/body/h1", "/html/body/p"]] self.assertEqual(len(encoding.nodes) , 2) self.assertEqual(len(encoding.xpaths) , 2) self.assertEqual(encoding.nodes , UpperCAmelCase__) self.assertEqual(encoding.xpaths , UpperCAmelCase__)
13
"""simple docstring""" def _A ( UpperCamelCase_ : list[int]) -> float: '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("List is empty") __lowercase = sum(UpperCamelCase_) / len(UpperCamelCase_) # Calculate the average return sum(abs(x - average) for x in nums) / len(UpperCamelCase_) if __name__ == "__main__": import doctest doctest.testmod()
17
0
def A ( a_ ,a_ ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def A ( ) -> None: assert or_gate(0 ,0 ) == 0 assert or_gate(0 ,1 ) == 1 assert or_gate(1 ,0 ) == 1 assert or_gate(1 ,1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
71
"""simple docstring""" import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ): __lowercase = parent __lowercase = vocab_size __lowercase = batch_size __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = is_training __lowercase = use_labels __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = type_sequence_label_size __lowercase = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase = (image_size // patch_size) ** 2 __lowercase = num_patches + 1 def _lowercase ( self : int ): __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size ) __lowercase = BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, ) return config, pixel_values, labels def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ): __lowercase = FlaxBeitModel(config=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ): __lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ): __lowercase = self.type_sequence_label_size __lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowercase = 1 __lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ ) __lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowercase = model(UpperCAmelCase__ ) def _lowercase ( self : List[str] ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) = config_and_inputs __lowercase = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class _lowerCAmelCase ( lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def _lowercase ( self : List[Any] ): __lowercase = FlaxBeitModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 ) def _lowercase ( self : Union[str, Any] ): self.config_tester.run_common_tests() def _lowercase ( self : Optional[int] ): __lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(UpperCAmelCase__ ) __lowercase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ["pixel_values"] self.assertListEqual(arg_names[:1], UpperCAmelCase__ ) def _lowercase ( self : Tuple ): __lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = model_class(UpperCAmelCase__ ) @jax.jit def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ): return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ ) with self.subTest("JIT Enabled" ): __lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ): self.assertEqual(jitted_output.shape, output.shape ) def _lowercase ( self : List[str] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def _lowercase ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def _lowercase ( self : Tuple ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def _lowercase ( self : Union[str, Any] ): for model_class_name in self.all_model_classes: __lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" ) __lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) ) self.assertIsNotNone(UpperCAmelCase__ ) def _A ( ) -> str: '''simple docstring''' __lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @require_flax class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _lowercase ( self : Any ): __lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values # prepare bool_masked_pos __lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ ) # forward pass __lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ ) __lowercase = outputs.logits # verify the logits __lowercase = (1, 1_9_6, 8_1_9_2) self.assertEqual(logits.shape, UpperCAmelCase__ ) __lowercase = np.array( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) ) @slow def _lowercase ( self : Any ): __lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ) # forward pass __lowercase = model(**UpperCAmelCase__ ) __lowercase = outputs.logits # verify the logits __lowercase = (1, 1_0_0_0) self.assertEqual(logits.shape, UpperCAmelCase__ ) __lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] ) self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) ) __lowercase = 2_8_1 self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ ) @slow def _lowercase ( self : List[str] ): __lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ) # forward pass __lowercase = model(**UpperCAmelCase__ ) __lowercase = outputs.logits # verify the logits __lowercase = (1, 2_1_8_4_1) self.assertEqual(logits.shape, UpperCAmelCase__ ) __lowercase = np.array([1.6_881, -0.2_787, 0.5_901] ) self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) ) __lowercase = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
17
0
'''simple docstring''' import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict=() , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[str]="no" , lowerCAmelCase__ : int="29500" ): """simple docstring""" __UpperCAmelCase : Optional[int] = False __UpperCAmelCase : Dict = False if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ): __UpperCAmelCase : Dict = True elif "IPython" in sys.modules: __UpperCAmelCase : Dict = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() ) try: __UpperCAmelCase : Optional[int] = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( f'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' ) if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , UpperCamelCase_ ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( """To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """ """your training function. Restart your notebook and make sure no cells initializes an """ """`Accelerator`.""" ) if num_processes is None: __UpperCAmelCase : Tuple = 8 __UpperCAmelCase : Optional[Any] = PrepareForLaunch(UpperCamelCase_ , distributed_type="""TPU""" ) print(f'Launching a training on {num_processes} TPU cores.' ) xmp.spawn(UpperCamelCase_ , args=UpperCamelCase_ , nprocs=UpperCamelCase_ , start_method="""fork""" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("""Launching training on one GPU.""" ) else: print("""Launching training on one CPU.""" ) function(*UpperCamelCase_ ) else: if num_processes is None: raise ValueError( """You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( """To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """ """inside your training function. Restart your notebook and make sure no cells initializes an """ """`Accelerator`.""" ) if torch.cuda.is_initialized(): raise ValueError( """To launch a multi-GPU training from your notebook, you need to avoid running any instruction """ """using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """ """function.""" ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=UpperCamelCase_ , master_addr="""127.0.01""" , master_port=UpperCamelCase_ , mixed_precision=UpperCamelCase_ ): __UpperCAmelCase : List[Any] = PrepareForLaunch(UpperCamelCase_ , distributed_type="""MULTI_GPU""" ) print(f'Launching training on {num_processes} GPUs.' ) try: start_processes(UpperCamelCase_ , args=UpperCamelCase_ , nprocs=UpperCamelCase_ , start_method="""fork""" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( """CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """ """This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """ """Please review your imports and test them when running the `notebook_launcher()` to identify """ """which one is problematic.""" ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): __UpperCAmelCase : Any = """1""" print("""Launching training on MPS.""" ) elif torch.cuda.is_available(): print("""Launching training on one GPU.""" ) else: print("""Launching training on CPU.""" ) function(*UpperCamelCase_ ) def lowercase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=() , lowerCAmelCase__ : Optional[int]=2 ): """simple docstring""" from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=UpperCamelCase_ , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ): __UpperCAmelCase : List[Any] = PrepareForLaunch(UpperCamelCase_ , debug=UpperCamelCase_ ) start_processes(UpperCamelCase_ , args=UpperCamelCase_ , nprocs=UpperCamelCase_ , start_method="""fork""" )
254
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _lowerCAmelCase ( unittest.TestCase ,lowercase ): """simple docstring""" def _lowercase ( self : List[Any] ): __lowercase = load_tool("text-classification" ) self.tool.setup() __lowercase = load_tool("text-classification", remote=UpperCAmelCase__ ) def _lowercase ( self : str ): __lowercase = self.tool("That's quite cool", ["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" ) def _lowercase ( self : str ): __lowercase = self.remote_tool("That's quite cool", ["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" ) def _lowercase ( self : List[str] ): __lowercase = self.tool(text="That's quite cool", labels=["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" ) def _lowercase ( self : Tuple ): __lowercase = self.remote_tool(text="That's quite cool", labels=["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" )
17
0
'''simple docstring''' import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class UpperCamelCase_ : def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=64 , A=5 , A=4 , A=64 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Tuple: UpperCAmelCase : Union[str, Any] = parent UpperCAmelCase : Union[str, Any] = batch_size UpperCAmelCase : Dict = seq_length UpperCAmelCase : Optional[int] = is_training UpperCAmelCase : int = use_input_mask UpperCAmelCase : Union[str, Any] = use_token_type_ids UpperCAmelCase : Optional[Any] = use_labels UpperCAmelCase : List[str] = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : Optional[Any] = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : Dict = intermediate_size UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : Tuple = hidden_dropout_prob UpperCAmelCase : Optional[int] = attention_probs_dropout_prob UpperCAmelCase : Optional[int] = max_position_embeddings UpperCAmelCase : List[Any] = type_vocab_size UpperCAmelCase : Optional[int] = type_sequence_label_size UpperCAmelCase : Any = initializer_range UpperCAmelCase : Union[str, Any] = num_labels UpperCAmelCase : List[Any] = num_choices UpperCAmelCase : List[str] = scope def _lowercase( self ) -> List[str]: return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Optional[Any] = None UpperCAmelCase : List[Any] = None UpperCAmelCase : int = None if self.use_labels: UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase( self ) -> List[str]: return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _lowercase( self , A , A , A , A , A , A ) -> Optional[Any]: UpperCAmelCase : Any = MPNetModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCAmelCase : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase : Any = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowercase( self , A , A , A , A , A , A ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = MPNetForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCAmelCase : int = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase( self , A , A , A , A , A , A ) -> int: UpperCAmelCase : Optional[int] = self.num_labels UpperCAmelCase : List[Any] = MPNetForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCAmelCase : List[Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self , A , A , A , A , A , A ) -> List[str]: UpperCAmelCase : str = self.num_choices UpperCAmelCase : int = MPNetForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : str = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase( self , A , A , A , A , A , A ) -> str: UpperCAmelCase : Dict = self.num_labels UpperCAmelCase : Optional[int] = MPNetForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() UpperCAmelCase : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Any = self.prepare_config_and_inputs() ((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : List[Any] = config_and_inputs UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) lowercase = ( { "feature-extraction": MPNetModel, "fill-mask": MPNetForMaskedLM, "question-answering": MPNetForQuestionAnswering, "text-classification": MPNetForSequenceClassification, "token-classification": MPNetForTokenClassification, "zero-shot": MPNetForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = True def _lowercase( self ) -> Any: UpperCAmelCase : int = MPNetModelTester(self ) UpperCAmelCase : Dict = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def _lowercase( self ) -> Tuple: self.config_tester.run_common_tests() def _lowercase( self ) -> Any: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase__ ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase__ ) def _lowercase( self ) -> Any: UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase__ ) def _lowercase( self ) -> Dict: UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase__ ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase__ ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> Optional[int]: UpperCAmelCase : List[Any] = MPNetModel.from_pretrained("""microsoft/mpnet-base""" ) UpperCAmelCase : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) UpperCAmelCase : int = model(UpperCAmelCase__ )[0] UpperCAmelCase : Any = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , UpperCAmelCase__ ) UpperCAmelCase : Any = torch.tensor( [[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
265
"""simple docstring""" from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker _a = 'CompVis/stable-diffusion-v1-1' _a = 'CompVis/stable-diffusion-v1-2' _a = 'CompVis/stable-diffusion-v1-3' _a = 'CompVis/stable-diffusion-v1-4' class _lowerCAmelCase ( lowercase ): """simple docstring""" def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCAmelCase__ : StableDiffusionSafetyChecker, UpperCAmelCase__ : CLIPImageProcessor, UpperCAmelCase__ : bool = True, ): super()._init_() __lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ ) __lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ ) __lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ ) __lowercase = StableDiffusionPipeline( vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, requires_safety_checker=UpperCAmelCase__, ) self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea ) @property def _lowercase ( self : List[str] ): return {k: getattr(self, UpperCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )} def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowercase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase__ ) def _lowercase ( self : List[str] ): self.enable_attention_slicing(UpperCAmelCase__ ) @torch.no_grad() def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Tuple, ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : str, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Any, ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Optional[int], ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ): __lowercase = "cuda" if torch.cuda.is_available() else "cpu" self.to(UpperCAmelCase__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get first result from Stable Diffusion Checkpoint v1.2 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get first result from Stable Diffusion Checkpoint v1.3 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get first result from Stable Diffusion Checkpoint v1.4 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
17
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__( __A , unittest.TestCase ): lowerCAmelCase__ : int = KandinskyVaaImgaImgPipeline lowerCAmelCase__ : Optional[Any] = ["image_embeds", "negative_image_embeds", "image"] lowerCAmelCase__ : int = [ "image_embeds", "negative_image_embeds", "image", ] lowerCAmelCase__ : List[str] = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] lowerCAmelCase__ : List[Any] = False @property def snake_case__ ( self ) -> Dict: return 32 @property def snake_case__ ( self ) -> str: return 32 @property def snake_case__ ( self ) -> Optional[Any]: return self.time_input_dim @property def snake_case__ ( self ) -> Optional[int]: return self.time_input_dim * 4 @property def snake_case__ ( self ) -> Optional[int]: return 1_00 @property def snake_case__ ( self ) -> Optional[int]: torch.manual_seed(0 ) A__ = { 'in_channels': 4, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } A__ = UNetaDConditionModel(**UpperCAmelCase__ ) return model @property def snake_case__ ( self ) -> Dict: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def snake_case__ ( self ) -> Dict: torch.manual_seed(0 ) A__ = VQModel(**self.dummy_movq_kwargs ) return model def snake_case__ ( self ) -> int: A__ = self.dummy_unet A__ = self.dummy_movq A__ = { 'num_train_timesteps': 10_00, 'beta_schedule': 'linear', 'beta_start': 0.0_0_0_8_5, 'beta_end': 0.0_1_2, 'clip_sample': False, 'set_alpha_to_one': False, 'steps_offset': 0, 'prediction_type': 'epsilon', 'thresholding': False, } A__ = DDIMScheduler(**UpperCAmelCase__ ) A__ = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> Any: A__ = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ ) A__ = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( UpperCAmelCase__ ) # create init_image A__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ ) A__ = image.cpu().permute(0 ,2 ,3 ,1 )[0] A__ = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('RGB' ).resize((2_56, 2_56) ) if str(UpperCAmelCase__ ).startswith('mps' ): A__ = torch.manual_seed(UpperCAmelCase__ ) else: A__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) A__ = { 'image': init_image, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 10, 'guidance_scale': 7.0, 'strength': 0.2, 'output_type': 'np', } return inputs def snake_case__ ( self ) -> str: A__ = 'cpu' A__ = self.get_dummy_components() A__ = self.pipeline_class(**UpperCAmelCase__ ) A__ = pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) A__ = pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) ) A__ = output.images A__ = pipe( **self.get_dummy_inputs(UpperCAmelCase__ ) ,return_dict=UpperCAmelCase__ ,)[0] A__ = image[0, -3:, -3:, -1] A__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A__ = np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class UpperCamelCase__( unittest.TestCase ): def snake_case__ ( self ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self ) -> Optional[Any]: A__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_img2img_frog.npy' ) A__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) A__ = 'A red cartoon frog, 4k' A__ = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa ) pipe_prior.to(UpperCAmelCase__ ) A__ = KandinskyVaaImgaImgPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder' ,torch_dtype=torch.floataa ) A__ = pipeline.to(UpperCAmelCase__ ) pipeline.set_progress_bar_config(disable=UpperCAmelCase__ ) A__ = torch.Generator(device='cpu' ).manual_seed(0 ) A__ , A__ = pipe_prior( UpperCAmelCase__ ,generator=UpperCAmelCase__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple() A__ = pipeline( image=UpperCAmelCase__ ,image_embeds=UpperCAmelCase__ ,negative_image_embeds=UpperCAmelCase__ ,generator=UpperCAmelCase__ ,num_inference_steps=1_00 ,height=7_68 ,width=7_68 ,strength=0.2 ,output_type='np' ,) A__ = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(UpperCAmelCase__ ,UpperCAmelCase__ )
221
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _lowerCAmelCase ( lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = "ssube/stable-diffusion-x4-upscaler-onnx" def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[str]=0 ): __lowercase = floats_tensor((1, 3, 1_2_8, 1_2_8), rng=random.Random(UpperCAmelCase__ ) ) __lowercase = torch.manual_seed(UpperCAmelCase__ ) __lowercase = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def _lowercase ( self : Any ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def _lowercase ( self : Optional[Any] ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowercase ( self : int ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowercase ( self : str ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowercase ( self : Any ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @property def _lowercase ( self : Tuple ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowercase ( self : Dict ): __lowercase = ort.SessionOptions() __lowercase = False return options def _lowercase ( self : Dict ): __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowercase = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = "A fantasy landscape, trending on artstation" __lowercase = torch.manual_seed(0 ) __lowercase = pipe( prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=UpperCAmelCase__, output_type="np", ) __lowercase = output.images __lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _lowercase ( self : str ): __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowercase = init_image.resize((1_2_8, 1_2_8) ) __lowercase = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" ) __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", scheduler=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = "A fantasy landscape, trending on artstation" __lowercase = torch.manual_seed(0 ) __lowercase = pipe( prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=UpperCAmelCase__, output_type="np", ) __lowercase = output.images __lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
17
0
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowerCAmelCase__( lowercase : List[Any] , lowercase : Tuple=False ) -> int: try: __snake_case : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __snake_case : Any = default else: # KEY is set, convert it to True or False. try: __snake_case : List[Any] = strtobool(UpperCamelCase_ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"""If set, {key} must be yes or no.""" ) return _value _UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) def lowerCAmelCase__( lowercase : int ) -> Optional[int]: return unittest.skip("Test was skipped" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : Any ) -> Optional[int]: return unittest.skipUnless(_run_slow_tests , "test is slow" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : Tuple ) -> List[Any]: return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : int ) -> Any: return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : Optional[int] ) -> Tuple: return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : Optional[int] ) -> Dict: return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : int ) -> List[Any]: return unittest.skipUnless( is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Tuple: return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : Optional[Any] ) -> int: return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : str ) -> List[Any]: return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : Union[str, Any] ) -> List[str]: return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : List[Any] ) -> str: return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : Optional[int] ) -> str: return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : str ) -> int: return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : Dict ) -> str: return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : List[str] ) -> Dict: return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : Dict=None , lowercase : int=None ) -> int: if test_case is None: return partial(UpperCamelCase_ , version=UpperCamelCase_ ) return unittest.skipUnless(is_torch_version(">=" , UpperCamelCase_ ) , f"""test requires torch version >= {version}""" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : List[Any] ) -> Any: return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : Optional[Any] ) -> Union[str, Any]: return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(UpperCamelCase_ ) def lowerCAmelCase__( lowercase : Optional[int] ) -> Tuple: return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(UpperCamelCase_ ) _UpperCamelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowerCAmelCase__( lowercase : List[str] ) -> str: return unittest.skipUnless( _atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(UpperCamelCase_ ) class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Tuple =True @classmethod def UpperCAmelCase ( cls ) -> str: '''simple docstring''' __snake_case : str = tempfile.mkdtemp() @classmethod def UpperCAmelCase ( cls ) -> int: '''simple docstring''' if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def UpperCAmelCase ( self ) -> str: '''simple docstring''' if self.clear_on_setup: for path in Path(self.tmpdir ).glob("**/*" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(UpperCAmelCase__ ) class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> str: '''simple docstring''' super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]: '''simple docstring''' __snake_case : Any = mocks if isinstance(UpperCAmelCase__ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowerCAmelCase__( lowercase : int ) -> int: __snake_case : Tuple = AcceleratorState() __snake_case : Optional[Any] = tensor[None].clone().to(state.device ) __snake_case : Dict = gather(UpperCamelCase_ ).cpu() __snake_case : Optional[int] = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , UpperCamelCase_ ): return False return True class _lowerCamelCase : """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: '''simple docstring''' __snake_case : Optional[int] = returncode __snake_case : Optional[int] = stdout __snake_case : List[Any] = stderr async def lowerCAmelCase__( lowercase : int , lowercase : List[str] ) -> Any: while True: __snake_case : Optional[int] = await stream.readline() if line: callback(UpperCamelCase_ ) else: break async def lowerCAmelCase__( lowercase : Optional[int] , lowercase : List[str]=None , lowercase : Optional[Any]=None , lowercase : List[str]=None , lowercase : Dict=False , lowercase : int=False ) -> _RunOutput: if echo: print("\nRunning: " , " ".join(UpperCamelCase_ ) ) __snake_case : Optional[int] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=UpperCamelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCamelCase_ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __snake_case : Any = [] __snake_case : int = [] def tee(lowercase : Any , lowercase : List[Any] , lowercase : Dict , lowercase : int="" ): __snake_case : str = line.decode("utf-8" ).rstrip() sink.append(UpperCamelCase_ ) if not quiet: print(UpperCamelCase_ , UpperCamelCase_ , file=UpperCamelCase_ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda lowercase : tee(UpperCamelCase_ , UpperCamelCase_ , sys.stdout , label="stdout:" ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda lowercase : tee(UpperCamelCase_ , UpperCamelCase_ , sys.stderr , label="stderr:" ) ) ), ] , timeout=UpperCamelCase_ , ) return _RunOutput(await p.wait() , UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__( lowercase : str , lowercase : Optional[Any]=None , lowercase : Dict=None , lowercase : str=180 , lowercase : Optional[int]=False , lowercase : int=True ) -> _RunOutput: __snake_case : Optional[int] = asyncio.get_event_loop() __snake_case : int = loop.run_until_complete( _stream_subprocess(UpperCamelCase_ , env=UpperCamelCase_ , stdin=UpperCamelCase_ , timeout=UpperCamelCase_ , quiet=UpperCamelCase_ , echo=UpperCamelCase_ ) ) __snake_case : int = " ".join(UpperCamelCase_ ) if result.returncode > 0: __snake_case : List[Any] = "\n".join(result.stderr ) raise RuntimeError( f"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" f"""The combined stderr from workers follows:\n{stderr}""" ) return result class _lowerCamelCase ( a ): """simple docstring""" pass def lowerCAmelCase__( lowercase : List[str] , lowercase : Optional[int]=False ) -> Union[str, Any]: try: __snake_case : Any = subprocess.check_output(UpperCamelCase_ , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(UpperCamelCase_ , "decode" ): __snake_case : Optional[Any] = output.decode("utf-8" ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f"""Command `{" ".join(UpperCamelCase_ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
326
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _a = datasets.utils.logging.get_logger(__name__) _a = ['names', 'prefix'] _a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] _a = ['encoding_errors', 'on_bad_lines'] _a = ['date_format'] @dataclass class _lowerCAmelCase ( datasets.BuilderConfig ): """simple docstring""" __UpperCAmelCase : str = "," __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[Union[int, List[int], str]] = "infer" __UpperCAmelCase : Optional[List[str]] = None __UpperCAmelCase : Optional[List[str]] = None __UpperCAmelCase : Optional[Union[int, str, List[int], List[str]]] = None __UpperCAmelCase : Optional[Union[List[int], List[str]]] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : bool = True __UpperCAmelCase : Optional[Literal["c", "python", "pyarrow"]] = None __UpperCAmelCase : Dict[Union[int, str], Callable[[Any], Any]] = None __UpperCAmelCase : Optional[list] = None __UpperCAmelCase : Optional[list] = None __UpperCAmelCase : bool = False __UpperCAmelCase : Optional[Union[int, List[int]]] = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[Union[str, List[str]]] = None __UpperCAmelCase : bool = True __UpperCAmelCase : bool = True __UpperCAmelCase : bool = False __UpperCAmelCase : bool = True __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : str = "." __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : str = '"' __UpperCAmelCase : int = 0 __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : bool = True __UpperCAmelCase : bool = True __UpperCAmelCase : int = 0 __UpperCAmelCase : bool = True __UpperCAmelCase : bool = False __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : int = 1_0_0_0_0 __UpperCAmelCase : Optional[datasets.Features] = None __UpperCAmelCase : Optional[str] = "strict" __UpperCAmelCase : Literal["error", "warn", "skip"] = "error" __UpperCAmelCase : Optional[str] = None def _lowercase ( self : Tuple ): if self.delimiter is not None: __lowercase = self.delimiter if self.column_names is not None: __lowercase = self.column_names @property def _lowercase ( self : Union[str, Any] ): __lowercase = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), UpperCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class _lowerCAmelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" __UpperCAmelCase : Tuple = CsvConfig def _lowercase ( self : List[str] ): return datasets.DatasetInfo(features=self.config.features ) def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __lowercase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase__, (str, list, tuple) ): __lowercase = data_files if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = [files] __lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )] __lowercase = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = [files] __lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__, gen_kwargs={"files": files} ) ) return splits def _lowercase ( self : Dict, UpperCAmelCase__ : pa.Table ): if self.config.features is not None: __lowercase = self.config.features.arrow_schema if all(not require_storage_cast(UpperCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast __lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=UpperCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example __lowercase = table_cast(UpperCAmelCase__, UpperCAmelCase__ ) return pa_table def _lowercase ( self : Optional[Any], UpperCAmelCase__ : List[str] ): __lowercase = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str __lowercase = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase__ ) else object for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ): __lowercase = pd.read_csv(UpperCAmelCase__, iterator=UpperCAmelCase__, dtype=UpperCAmelCase__, **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(UpperCAmelCase__ ): __lowercase = pa.Table.from_pandas(UpperCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__ ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase__ )}: {e}""" ) raise
17
0
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class lowercase ( unittest.TestCase ): def a__ ( self ) -> Union[str, Any]: _A : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _A : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(UpperCAmelCase__ ) _A : Optional[Any] = -1 _A : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ ) _A : Dict = model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ ) _A : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _A : Optional[int] = TextStreamer(UpperCAmelCase__ ) model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _A : Optional[Any] = cs.out[:-1] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def a__ ( self ) -> Tuple: _A : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _A : str = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(UpperCAmelCase__ ) _A : Optional[int] = -1 _A : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ ) _A : Tuple = model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ ) _A : Union[str, Any] = tokenizer.decode(greedy_ids[0] ) _A : Tuple = TextIteratorStreamer(UpperCAmelCase__ ) _A : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} _A : int = Thread(target=model.generate , kwargs=UpperCAmelCase__ ) thread.start() _A : Tuple = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def a__ ( self ) -> Optional[Any]: _A : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _A : Tuple = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(UpperCAmelCase__ ) _A : Union[str, Any] = -1 _A : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ ) _A : int = model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ ) _A : List[str] = greedy_ids[:, input_ids.shape[1] :] _A : str = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _A : Union[str, Any] = TextStreamer(UpperCAmelCase__ , skip_prompt=UpperCAmelCase__ ) model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _A : Tuple = cs.out[:-1] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def a__ ( self ) -> List[str]: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _A : Optional[Any] = AutoTokenizer.from_pretrained("""distilgpt2""" ) _A : List[str] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(UpperCAmelCase__ ) _A : Optional[Any] = -1 _A : Dict = torch.ones((1, 5) , device=UpperCAmelCase__ ).long() * model.config.bos_token_id with CaptureStdout() as cs: _A : Tuple = TextStreamer(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) model.generate(UpperCAmelCase__ , max_new_tokens=1 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _A : Tuple = cs.out[:-1] # Remove the final "\n" _A : int = tokenizer(UpperCAmelCase__ , return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def a__ ( self ) -> List[str]: _A : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) _A : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(UpperCAmelCase__ ) _A : Any = -1 _A : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ ) _A : Any = TextIteratorStreamer(UpperCAmelCase__ , timeout=0.001 ) _A : str = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} _A : Union[str, Any] = Thread(target=model.generate , kwargs=UpperCAmelCase__ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(UpperCAmelCase__ ): _A : int = """""" for new_text in streamer: streamer_text += new_text
26
"""simple docstring""" from scipy.stats import spearmanr import datasets _a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n' _a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n' _a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): """simple docstring""" def _lowercase ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], ) def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ): __lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
17
0
"""simple docstring""" import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class lowercase_ ( pl.LightningModule ): '''simple docstring''' def __init__( self : Optional[Any] , _UpperCAmelCase : str ): super().__init__() _A = model _A = 2 _A = nn.Linear(self.model.config.hidden_size , self.num_labels ) def lowerCAmelCase_ ( self : Optional[int] ): pass def _snake_case ( _snake_case : str , _snake_case : str , _snake_case : str ) -> str: '''simple docstring''' _A = LongformerModel.from_pretrained(UpperCamelCase_ ) _A = LightningModel(UpperCamelCase_ ) _A = torch.load(UpperCamelCase_ , map_location=torch.device('cpu' ) ) lightning_model.load_state_dict(ckpt['state_dict'] ) # init longformer question answering model _A = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(UpperCamelCase_ ) print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' ) if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--longformer_model''', default=None, type=str, required=True, help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''', ) parser.add_argument( '''--longformer_question_answering_ckpt_path''', default=None, type=str, required=True, help='''Path the official PyTorch Lightning Checkpoint.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
315
"""simple docstring""" from collections.abc import Sequence def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_)) def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float: '''simple docstring''' __lowercase = 0.0 for coeff in reversed(UpperCamelCase_): __lowercase = result * x + coeff return result if __name__ == "__main__": _a = (0.0, 0.0, 5.0, 9.3, 7.0) _a = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
17
0
from sklearn.metrics import mean_squared_error import datasets lowercase__ : int = '''\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n''' lowercase__ : Optional[int] = '''\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n''' lowercase__ : Union[str, Any] = '''\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="uniform_average" , __SCREAMING_SNAKE_CASE=True ) ->Any: lowerCAmelCase = mean_squared_error( UpperCAmelCase__ , UpperCAmelCase__ , sample_weight=UpperCAmelCase__ , multioutput=UpperCAmelCase__ , squared=UpperCAmelCase__ ) return {"mse": mse}
338
"""simple docstring""" import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class _lowerCAmelCase ( pl.LightningModule ): """simple docstring""" def __init__( self : Optional[Any], UpperCAmelCase__ : str ): super().__init__() __lowercase = model __lowercase = 2 __lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels ) def _lowercase ( self : Optional[int] ): pass def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str: '''simple docstring''' __lowercase = LongformerModel.from_pretrained(UpperCamelCase_) __lowercase = LightningModel(UpperCamelCase_) __lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu")) lightning_model.load_state_dict(ckpt["state_dict"]) # init longformer question answering model __lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict()) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict()) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(UpperCamelCase_) print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""") if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
17
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a__ : Union[str, Any] ={'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] =['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str =[ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys a__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
53
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[int] ): if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, ) assert hasattr(self, "env" ) def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ): # configuration for running training on smdistributed Model Parallel __lowercase = { "enabled": True, "processes_per_host": 8, } __lowercase = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } __lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} __lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 5_0_0, }, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", ) def _lowercase ( self : Tuple, UpperCAmelCase__ : int ): TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ): # create estimator __lowercase = self.create_estimator(UpperCAmelCase__ ) # run training estimator.fit() # result dataframe __lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis __lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) __lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping __lowercase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
17
0
from math import sqrt def UpperCamelCase ( __lowerCamelCase : int ): snake_case : List[Any] = 0 for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ): if n % i == 0 and i != sqrt(UpperCamelCase_ ): total += i + n // i elif i == sqrt(UpperCamelCase_ ): total += i return total - n def UpperCamelCase ( __lowerCamelCase : int = 10000 ): snake_case : List[str] = sum( i for i in range(1 , UpperCamelCase_ ) if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
59
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : Tuple = "openai/whisper-base" __UpperCAmelCase : Union[str, Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __UpperCAmelCase : List[str] = "transcriber" __UpperCAmelCase : Optional[Any] = WhisperProcessor __UpperCAmelCase : str = WhisperForConditionalGeneration __UpperCAmelCase : List[str] = ["audio"] __UpperCAmelCase : Tuple = ["text"] def _lowercase ( self : str, UpperCAmelCase__ : int ): return self.pre_processor(UpperCAmelCase__, return_tensors="pt" ).input_features def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ): return self.model.generate(inputs=UpperCAmelCase__ ) def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int] ): return self.pre_processor.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )[0]
17
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : str = { """configuration_upernet""": ["""UperNetConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = [ """UperNetForSemanticSegmentation""", """UperNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
13
"""simple docstring""" import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]: '''simple docstring''' if isinstance(UpperCamelCase_, torch.Tensor): return image elif isinstance(UpperCamelCase_, PIL.Image.Image): __lowercase = [image] if isinstance(image[0], PIL.Image.Image): __lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] __lowercase = np.concatenate(UpperCamelCase_, axis=0) __lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0 __lowercase = image.transpose(0, 3, 1, 2) __lowercase = 2.0 * image - 1.0 __lowercase = torch.from_numpy(UpperCamelCase_) elif isinstance(image[0], torch.Tensor): __lowercase = torch.cat(UpperCamelCase_, dim=0) return image def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int: '''simple docstring''' if not isinstance(UpperCamelCase_, np.ndarray): __lowercase = True __lowercase = va.device __lowercase = va.cpu().numpy() __lowercase = va.cpu().numpy() __lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_))) if np.abs(UpperCamelCase_) > DOT_THRESHOLD: __lowercase = (1 - t) * va + t * va else: __lowercase = np.arccos(UpperCamelCase_) __lowercase = np.sin(UpperCamelCase_) __lowercase = theta_a * t __lowercase = np.sin(UpperCamelCase_) __lowercase = np.sin(theta_a - theta_t) / sin_theta_a __lowercase = sin_theta_t / sin_theta_a __lowercase = sa * va + sa * va if inputs_are_torch: __lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_) return va def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int: '''simple docstring''' __lowercase = F.normalize(UpperCamelCase_, dim=-1) __lowercase = F.normalize(UpperCamelCase_, dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]: '''simple docstring''' for param in model.parameters(): __lowercase = value class _lowerCAmelCase ( lowercase ): """simple docstring""" def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ): super().__init__() self.register_modules( vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, ) __lowercase = ( feature_extractor.size if isinstance(feature_extractor.size, UpperCAmelCase__ ) else feature_extractor.size["shortest_edge"] ) __lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std ) set_requires_grad(self.text_encoder, UpperCAmelCase__ ) set_requires_grad(self.clip_model, UpperCAmelCase__ ) def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowercase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase__ ) def _lowercase ( self : int ): self.enable_attention_slicing(UpperCAmelCase__ ) def _lowercase ( self : str ): set_requires_grad(self.vae, UpperCAmelCase__ ) def _lowercase ( self : Any ): set_requires_grad(self.vae, UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any] ): set_requires_grad(self.unet, UpperCAmelCase__ ) def _lowercase ( self : Any ): set_requires_grad(self.unet, UpperCAmelCase__ ) def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ): # get the original timestep using init_timestep __lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ ) __lowercase = max(num_inference_steps - init_timestep, 0 ) __lowercase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ): if not isinstance(UpperCAmelCase__, torch.Tensor ): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" ) __lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ ) ] __lowercase = torch.cat(UpperCAmelCase__, dim=0 ) else: __lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 0.18_215 * init_latents __lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 ) __lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ ) # get latents __lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = init_latents return latents def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ): __lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): __lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) ) __lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," ) def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ): __lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ ) __lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half() __lowercase = self.clip_model.get_image_features(UpperCAmelCase__ ) __lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ ) __lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 ) return image_embeddings_clip @torch.enable_grad() def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ): __lowercase = latents.detach().requires_grad_() __lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) # predict the noise residual __lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): __lowercase = self.scheduler.alphas_cumprod[timestep] __lowercase = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 __lowercase = torch.sqrt(UpperCAmelCase__ ) __lowercase = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler, UpperCAmelCase__ ): __lowercase = self.scheduler.sigmas[index] __lowercase = latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 1 / 0.18_215 * sample __lowercase = self.vae.decode(UpperCAmelCase__ ).sample __lowercase = (image / 2 + 0.5).clamp(0, 1 ) __lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ ) __lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype ) __lowercase = self.clip_model.get_image_features(UpperCAmelCase__ ) __lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ ) __lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale __lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0] if isinstance(self.scheduler, UpperCAmelCase__ ): __lowercase = latents.detach() + grads * (sigma**2) __lowercase = noise_pred_original else: __lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ): if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1: __lowercase = [generator] + [None] * (batch_size - 1) __lowercase = [ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] __lowercase = [x[0] for x in coca_is_none if x[1]] __lowercase = ", ".join(UpperCAmelCase__ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(UpperCAmelCase__ ): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) __lowercase = self.get_image_description(UpperCAmelCase__ ) if style_prompt is None: if len(UpperCAmelCase__ ): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) __lowercase = self.get_image_description(UpperCAmelCase__ ) # get prompt text embeddings for content and style __lowercase = self.tokenizer( UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", ) __lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] __lowercase = self.tokenizer( UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", ) __lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] __lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # duplicate text embeddings for each generation per prompt __lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 ) # set timesteps __lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) __lowercase = {} if accepts_offset: __lowercase = 1 self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) __lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device ) __lowercase = timesteps[:1].repeat(UpperCAmelCase__ ) # Preprocess image __lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.prepare_latents( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ ) __lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.prepare_latents( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ ) __lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) if clip_guidance_scale > 0: __lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = slerp( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __lowercase = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __lowercase = content_text_input.input_ids.shape[-1] __lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" ) __lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt __lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowercase = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8) __lowercase = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps __lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to( self.device ) else: __lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) __lowercase = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __lowercase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __lowercase = {} if accepts_eta: __lowercase = eta # check if the scheduler accepts generator __lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: __lowercase = generator with self.progress_bar(total=UpperCAmelCase__ ): for i, t in enumerate(UpperCAmelCase__ ): # expand the latents if we are doing classifier free guidance __lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) # predict the noise residual __lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample # perform classifier free guidance if do_classifier_free_guidance: __lowercase ,__lowercase = noise_pred.chunk(2 ) __lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: __lowercase = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) __lowercase ,__lowercase = self.cond_fn( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) # compute the previous noisy sample x_t -> x_t-1 __lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 1 / 0.18_215 * latents __lowercase = self.vae.decode(UpperCAmelCase__ ).sample __lowercase = (image / 2 + 0.5).clamp(0, 1 ) __lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": __lowercase = self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
17
0
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class __A ( unittest.TestCase ): """simple docstring""" def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : int =3 __UpperCamelCase : str =250 __UpperCamelCase : Dict =ids_tensor((batch_size, length) , UpperCAmelCase__ ) __UpperCamelCase : Tuple =torch.ones((batch_size, length) , device=UpperCAmelCase__ , dtype=torch.float ) / length return input_ids, scores def __lowercase ( self ): """simple docstring""" __UpperCamelCase , __UpperCamelCase : Optional[Any] =self._get_tensors(5 ) __UpperCamelCase : List[Any] =StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) ) __UpperCamelCase , __UpperCamelCase : List[str] =self._get_tensors(9 ) self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) ) __UpperCamelCase , __UpperCamelCase : Dict =self._get_tensors(10 ) self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : int =MaxLengthCriteria(max_length=10 ) __UpperCamelCase , __UpperCamelCase : str =self._get_tensors(5 ) self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) ) __UpperCamelCase , __UpperCamelCase : Any =self._get_tensors(9 ) self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) ) __UpperCamelCase , __UpperCamelCase : Optional[int] =self._get_tensors(10 ) self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Union[str, Any] =MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) __UpperCamelCase , __UpperCamelCase : Union[str, Any] =self._get_tensors(5 ) self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) ) __UpperCamelCase , __UpperCamelCase : Tuple =self._get_tensors(9 ) self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) ) __UpperCamelCase , __UpperCamelCase : List[str] =self._get_tensors(10 ) self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) ) __UpperCamelCase : Any =StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase , __UpperCamelCase : Union[str, Any] =self._get_tensors(5 ) __UpperCamelCase : Tuple =MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) ) __UpperCamelCase : Optional[Any] =MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) ) def __lowercase ( self ): """simple docstring""" validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(UpperCAmelCase__ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) __UpperCamelCase : List[Any] =validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(UpperCAmelCase__ ) , 1 )
71
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class _lowerCAmelCase : """simple docstring""" __UpperCAmelCase : Tuple = XGLMConfig __UpperCAmelCase : Optional[Any] = {} __UpperCAmelCase : Union[str, Any] = "gelu" def __init__( self : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=1_4, UpperCAmelCase__ : str=7, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[Any]=True, UpperCAmelCase__ : int=True, UpperCAmelCase__ : List[str]=9_9, UpperCAmelCase__ : Union[str, Any]=3_2, UpperCAmelCase__ : Union[str, Any]=2, UpperCAmelCase__ : Union[str, Any]=4, UpperCAmelCase__ : Tuple=3_7, UpperCAmelCase__ : List[Any]="gelu", UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Tuple=5_1_2, UpperCAmelCase__ : Optional[Any]=0.02, ): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_labels __lowercase = vocab_size __lowercase = d_model __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = ffn_dim __lowercase = activation_function __lowercase = activation_dropout __lowercase = attention_dropout __lowercase = max_position_embeddings __lowercase = initializer_range __lowercase = None __lowercase = 0 __lowercase = 2 __lowercase = 1 def _lowercase ( self : Union[str, Any] ): return XGLMConfig.from_pretrained("facebook/xglm-564M" ) def _lowercase ( self : Tuple ): __lowercase = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = self.get_config() __lowercase = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowercase ( self : List[Any] ): return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=UpperCAmelCase__, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=UpperCAmelCase__, ) def _lowercase ( self : Dict ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) = config_and_inputs __lowercase = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCAmelCase : Any = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : List[str] = False __UpperCAmelCase : int = False def _lowercase ( self : Optional[Any] ): __lowercase = TFXGLMModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__, n_embd=3_7 ) def _lowercase ( self : Any ): self.config_tester.run_common_tests() @slow def _lowercase ( self : List[str] ): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = TFXGLMModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." ) def _lowercase ( self : int ): super().test_resize_token_embeddings() @require_tf class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int]=True ): __lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) __lowercase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]], dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __lowercase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on __lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), UpperCAmelCase__ ) @slow def _lowercase ( self : List[Any] ): __lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) __lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) tf.random.set_seed(0 ) __lowercase = tokenizer("Today is a nice day and", return_tensors="tf" ) __lowercase = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0" ): __lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, seed=[7, 0] ) __lowercase = tokenizer.decode(output_ids[0], skip_special_tokens=UpperCAmelCase__ ) __lowercase = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ ) @slow def _lowercase ( self : Dict ): __lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) __lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) __lowercase = "left" # use different length sentences to test batching __lowercase = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] __lowercase = tokenizer(UpperCAmelCase__, return_tensors="tf", padding=UpperCAmelCase__ ) __lowercase = inputs["input_ids"] __lowercase = model.generate(input_ids=UpperCAmelCase__, attention_mask=inputs["attention_mask"], max_new_tokens=1_2 ) __lowercase = tokenizer(sentences[0], return_tensors="tf" ).input_ids __lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 ) __lowercase = tokenizer(sentences[1], return_tensors="tf" ).input_ids __lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 ) __lowercase = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ ) __lowercase = tokenizer.decode(output_non_padded[0], skip_special_tokens=UpperCAmelCase__ ) __lowercase = tokenizer.decode(output_padded[0], skip_special_tokens=UpperCAmelCase__ ) __lowercase = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__, [non_padded_sentence, padded_sentence] )
17
0
'''simple docstring''' from pathlib import Path import fire from tqdm import tqdm def lowercase_ ( lowerCAmelCase__ : Optional[Any]="ro" , lowerCAmelCase__ : str="en" , lowerCAmelCase__ : Union[str, Any]="wmt16" , lowerCAmelCase__ : Tuple=None ): """simple docstring""" try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("""run pip install datasets""" ) __UpperCAmelCase : Optional[Any] = f'{src_lang}-{tgt_lang}' print(f'Converting {dataset}-{pair}' ) __UpperCAmelCase : str = datasets.load_dataset(UpperCamelCase_ , UpperCamelCase_ ) if save_dir is None: __UpperCAmelCase : Optional[int] = f'{dataset}-{pair}' __UpperCAmelCase : Tuple = Path(UpperCamelCase_ ) save_dir.mkdir(exist_ok=UpperCamelCase_ ) for split in ds.keys(): print(f'Splitting {split} with {ds[split].num_rows} records' ) # to save to val.source, val.target like summary datasets __UpperCAmelCase : Optional[Any] = """val""" if split == """validation""" else split __UpperCAmelCase : int = save_dir.joinpath(f'{fn}.source' ) __UpperCAmelCase : List[Any] = save_dir.joinpath(f'{fn}.target' ) __UpperCAmelCase : Dict = src_path.open("""w+""" ) __UpperCAmelCase : Any = tgt_path.open("""w+""" ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): __UpperCAmelCase : Any = x["""translation"""] src_fp.write(ex[src_lang] + """\n""" ) tgt_fp.write(ex[tgt_lang] + """\n""" ) print(f'Saved {dataset} dataset to {save_dir}' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
254
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _a = '__DUMMY_TRANSFORMERS_USER__' _a = 'Dummy User' _a = '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' _a = 'https://hub-ci.huggingface.co' _a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}' _a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}' _a = Path('~/.huggingface/hub_ci_token').expanduser() @pytest.fixture def _A ( UpperCamelCase_ : List[Any]) -> Tuple: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : int) -> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT", UpperCamelCase_) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : str) -> Dict: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : List[Any]) -> List[str]: '''simple docstring''' HfFolder.save_token(UpperCamelCase_) yield HfFolder.delete_token() @pytest.fixture(scope="session") def _A ( ) -> List[Any]: '''simple docstring''' return HfApi(endpoint=UpperCamelCase_) @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi) -> List[Any]: '''simple docstring''' __lowercase = HfFolder.get_token() HfFolder.save_token(UpperCamelCase_) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : Dict) -> int: '''simple docstring''' def _cleanup_repo(UpperCamelCase_ : Optional[int]): hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") return _cleanup_repo @pytest.fixture def _A ( UpperCamelCase_ : str) -> Any: '''simple docstring''' @contextmanager def _temporary_repo(UpperCamelCase_ : Any): try: yield repo_id finally: cleanup_repo(UpperCamelCase_) return _temporary_repo @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : str, UpperCamelCase_ : Optional[int]) -> List[Any]: '''simple docstring''' __lowercase = F"""repo_txt_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data/text_data.txt", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Any, UpperCamelCase_ : Dict) -> Optional[int]: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]) -> int: '''simple docstring''' __lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Dict, UpperCamelCase_ : Any) -> int: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> List[Any]: '''simple docstring''' __lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> str: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
17
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : Dict = { """vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""", # See all GLPN models at https://huggingface.co/models?filter=glpn } class UpperCamelCase_ ( __magic_name__ ): lowercase = "glpn" def __init__( self , A=3 , A=4 , A=[2, 2, 2, 2] , A=[8, 4, 2, 1] , A=[32, 64, 160, 256] , A=[7, 3, 3, 3] , A=[4, 2, 2, 2] , A=[1, 2, 5, 8] , A=[4, 4, 4, 4] , A="gelu" , A=0.0 , A=0.0 , A=0.0_2 , A=0.1 , A=1e-6 , A=64 , A=10 , A=-1 , **A , ) -> Optional[Any]: super().__init__(**UpperCAmelCase__ ) UpperCAmelCase : Dict = num_channels UpperCAmelCase : Optional[Any] = num_encoder_blocks UpperCAmelCase : List[Any] = depths UpperCAmelCase : List[str] = sr_ratios UpperCAmelCase : Any = hidden_sizes UpperCAmelCase : List[Any] = patch_sizes UpperCAmelCase : Dict = strides UpperCAmelCase : Tuple = mlp_ratios UpperCAmelCase : List[Any] = num_attention_heads UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : Optional[Any] = hidden_dropout_prob UpperCAmelCase : str = attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] = initializer_range UpperCAmelCase : Optional[int] = drop_path_rate UpperCAmelCase : str = layer_norm_eps UpperCAmelCase : Union[str, Any] = decoder_hidden_size UpperCAmelCase : Any = max_depth UpperCAmelCase : str = head_in_index
265
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : int = "time_series_transformer" __UpperCAmelCase : Any = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self : int, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : str = "student_t", UpperCAmelCase__ : str = "nll", UpperCAmelCase__ : int = 1, UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7], UpperCAmelCase__ : Optional[Union[str, bool]] = "mean", UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : str = "gelu", UpperCAmelCase__ : int = 6_4, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : int = 1_0_0, UpperCAmelCase__ : float = 0.02, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : List[str], ): # time series specific configuration __lowercase = prediction_length __lowercase = context_length or prediction_length __lowercase = distribution_output __lowercase = loss __lowercase = input_size __lowercase = num_time_features __lowercase = lags_sequence __lowercase = scaling __lowercase = num_dynamic_real_features __lowercase = num_static_real_features __lowercase = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) __lowercase = cardinality else: __lowercase = [0] if embedding_dimension and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) __lowercase = embedding_dimension else: __lowercase = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality] __lowercase = num_parallel_samples # Transformer architecture configuration __lowercase = input_size * len(UpperCAmelCase__ ) + self._number_of_features __lowercase = d_model __lowercase = encoder_attention_heads __lowercase = decoder_attention_heads __lowercase = encoder_ffn_dim __lowercase = decoder_ffn_dim __lowercase = encoder_layers __lowercase = decoder_layers __lowercase = dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = encoder_layerdrop __lowercase = decoder_layerdrop __lowercase = activation_function __lowercase = init_std __lowercase = use_cache super().__init__(is_encoder_decoder=UpperCAmelCase__, **UpperCAmelCase__ ) @property def _lowercase ( self : Optional[Any] ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
17
0
"""simple docstring""" def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError('iterations must be defined as integers' ) if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) A__ = '' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(UpperCamelCase_ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
221
"""simple docstring""" import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _lowerCAmelCase : """simple docstring""" @staticmethod def _lowercase ( *UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : List[Any] ): pass def _A ( UpperCamelCase_ : Union[str, Any]) -> Any: '''simple docstring''' return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. _a = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any] ): __lowercase = pipeline( "document-question-answering", model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ ) __lowercase = INVOICE_URL __lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) ) __lowercase = "What is the placebo?" __lowercase = [ { "image": load_image(UpperCAmelCase__ ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def _lowercase ( self : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any ): __lowercase = dqa_pipeline(UpperCAmelCase__, top_k=2 ) self.assertEqual( UpperCAmelCase__, [ [ {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )}, {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )}, ] ] * 3, ) @require_torch @require_detectrona @require_pytesseract def _lowercase ( self : Dict ): __lowercase = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2" ) __lowercase = INVOICE_URL __lowercase = "How many cats are there?" __lowercase = [ {"score": 0.0_001, "answer": "oy 2312/2019", "start": 3_8, "end": 3_9}, {"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 3_8, "end": 4_0}, ] __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably __lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual(UpperCAmelCase__, [] ) # We can optionnally pass directly the words and bounding boxes __lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png" __lowercase = [] __lowercase = [] __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, words=UpperCAmelCase__, boxes=UpperCAmelCase__, top_k=2 ) self.assertEqual(UpperCAmelCase__, [] ) @slow @require_torch @require_detectrona @require_pytesseract def _lowercase ( self : List[str] ): __lowercase = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ] * 2, ) @slow @require_torch @require_detectrona @require_pytesseract def _lowercase ( self : Dict ): __lowercase = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=5_0, ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6}, ] ] * 2, ) @slow @require_torch @require_pytesseract @require_vision def _lowercase ( self : Optional[Any] ): __lowercase = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ ) __lowercase = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ], ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ] ] * 2, ) __lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) ) # This model should also work if `image` is set to None __lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ], ) @slow @require_torch @require_pytesseract @require_vision def _lowercase ( self : Union[str, Any] ): __lowercase = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ ) __lowercase = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", max_seq_len=5_0, ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6}, ] ] * 2, ) __lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) ) # This model should also work if `image` is set to None __lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) @slow @require_torch def _lowercase ( self : Dict ): __lowercase = pipeline( "document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def _lowercase ( self : List[Any] ): pass
17
0
import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib _UpperCamelCase = { '''debug''': logging.DEBUG, '''info''': logging.INFO, '''warning''': logging.WARNING, '''error''': logging.ERROR, '''critical''': logging.CRITICAL, } _UpperCamelCase = logging.WARNING def lowerCAmelCase__( ) -> int: __snake_case : Dict = os.getenv("DATASETS_VERBOSITY" , UpperCamelCase_ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """ f"""has to be one of: { ", ".join(log_levels.keys() ) }""" ) return _default_log_level def lowerCAmelCase__( ) -> str: return __name__.split("." )[0] def lowerCAmelCase__( ) -> logging.Logger: return logging.getLogger(_get_library_name() ) def lowerCAmelCase__( ) -> None: __snake_case : int = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def lowerCAmelCase__( ) -> None: __snake_case : str = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def lowerCAmelCase__( lowercase : Optional[str] = None ) -> logging.Logger: if name is None: __snake_case : Any = _get_library_name() return logging.getLogger(UpperCamelCase_ ) def lowerCAmelCase__( ) -> int: return _get_library_root_logger().getEffectiveLevel() def lowerCAmelCase__( lowercase : int ) -> None: _get_library_root_logger().setLevel(UpperCamelCase_ ) def lowerCAmelCase__( ) -> Any: return set_verbosity(UpperCamelCase_ ) def lowerCAmelCase__( ) -> Optional[int]: return set_verbosity(UpperCamelCase_ ) def lowerCAmelCase__( ) -> Optional[Any]: return set_verbosity(UpperCamelCase_ ) def lowerCAmelCase__( ) -> str: return set_verbosity(UpperCamelCase_ ) def lowerCAmelCase__( ) -> None: __snake_case : List[str] = False def lowerCAmelCase__( ) -> None: __snake_case : Dict = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class _lowerCamelCase : """simple docstring""" def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]: # pylint: disable=unused-argument '''simple docstring''' __snake_case : List[Any] = args[0] if args else None def __iter__( self ) -> int: '''simple docstring''' return iter(self._iterator ) def __getattr__( self , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' def empty_fn(*UpperCAmelCase , **UpperCAmelCase ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ) -> Tuple: '''simple docstring''' return self def __exit__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple: '''simple docstring''' return _UpperCamelCase = True class _lowerCamelCase : """simple docstring""" def __call__( self , *UpperCAmelCase , UpperCAmelCase=False , **UpperCAmelCase ) -> int: '''simple docstring''' if _tqdm_active and not disable: return tqdm_lib.tqdm(*UpperCAmelCase__ , **UpperCAmelCase__ ) else: return EmptyTqdm(*UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Any: '''simple docstring''' __snake_case : Any = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm.get_lock() _UpperCamelCase = _tqdm_cls() def lowerCAmelCase__( ) -> bool: global _tqdm_active return bool(_tqdm_active ) def lowerCAmelCase__( ) -> Optional[int]: global _tqdm_active __snake_case : List[str] = True def lowerCAmelCase__( ) -> List[Any]: global _tqdm_active __snake_case : str = False
326
"""simple docstring""" import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() _a = 2 class _lowerCAmelCase : """simple docstring""" def __init__( self : Dict, *, # begin keyword-only arguments UpperCAmelCase__ : str="<s>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : str="</s>", UpperCAmelCase__ : Optional[Any]="<unk>", UpperCAmelCase__ : List[Any]=None, ): __lowercase ,__lowercase ,__lowercase ,__lowercase = bos, unk, pad, eos __lowercase = [] __lowercase = [] __lowercase = {} __lowercase = self.add_symbol(UpperCAmelCase__ ) __lowercase = self.add_symbol(UpperCAmelCase__ ) __lowercase = self.add_symbol(UpperCAmelCase__ ) __lowercase = self.add_symbol(UpperCAmelCase__ ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(UpperCAmelCase__ ) __lowercase = len(self.symbols ) def __eq__( self : List[str], UpperCAmelCase__ : Dict ): return self.indices == other.indices def __getitem__( self : Optional[int], UpperCAmelCase__ : List[str] ): if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : str ): return len(self.symbols ) def __contains__( self : Any, UpperCAmelCase__ : Optional[Any] ): return sym in self.indices @classmethod def _lowercase ( cls : List[Any], UpperCAmelCase__ : Optional[Any] ): __lowercase = cls() d.add_from_file(UpperCAmelCase__ ) return d def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : str=False ): if word in self.indices and not overwrite: __lowercase = self.indices[word] __lowercase = self.count[idx] + n return idx else: __lowercase = len(self.symbols ) __lowercase = idx self.symbols.append(UpperCAmelCase__ ) self.count.append(UpperCAmelCase__ ) return idx def _lowercase ( self : Any, UpperCAmelCase__ : str ): return 0 def _lowercase ( self : Tuple, UpperCAmelCase__ : List[Any] ): if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): try: with open(UpperCAmelCase__, "r", encoding="utf-8" ) as fd: self.add_from_file(UpperCAmelCase__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCAmelCase__ ) ) return __lowercase = f.readlines() __lowercase = self._load_meta(UpperCAmelCase__ ) for line in lines[indices_start_line:]: try: __lowercase ,__lowercase = line.rstrip().rsplit(" ", 1 ) if field == "#fairseq:overwrite": __lowercase = True __lowercase ,__lowercase = line.rsplit(" ", 1 ) else: __lowercase = False __lowercase = int(UpperCAmelCase__ ) __lowercase = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(UpperCAmelCase__ ) ) self.add_symbol(UpperCAmelCase__, n=UpperCAmelCase__, overwrite=UpperCAmelCase__ ) except ValueError: raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" ) def _A ( UpperCamelCase_ : int) -> str: '''simple docstring''' __lowercase = dict((re.sub(r"@@$", "", UpperCamelCase_), v) if k.endswith("@@") else (re.sub(r"$", "</w>", UpperCamelCase_), v) for k, v in d.items()) __lowercase = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[F"""{k}</w>"""] __lowercase = d[k] # restore return da def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> List[Any]: '''simple docstring''' if not os.path.exists(UpperCamelCase_): raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""") os.makedirs(UpperCamelCase_, exist_ok=UpperCamelCase_) print(F"""Writing results to {pytorch_dump_folder_path}""") # handle various types of models __lowercase = os.path.join(UpperCamelCase_, "checkpoint.pt") if not os.path.isfile(UpperCamelCase_): raise ValueError(F"""path to the file {checkpoint_file} does not exist!""") __lowercase = torch.load(UpperCamelCase_, map_location="cpu") __lowercase = chkpt["cfg"]["model"] # dicts __lowercase = os.path.join(UpperCamelCase_, "dict.txt") if not os.path.isfile(UpperCamelCase_): raise ValueError(F"""path to the file {dict_file} does not exist!""") __lowercase = Dictionary.load(UpperCamelCase_) __lowercase = rewrite_dict_keys(src_dict.indices) __lowercase = len(UpperCamelCase_) __lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["vocab_file"]) print(F"""Generating {src_vocab_file} of {src_vocab_size} records""") with open(UpperCamelCase_, "w", encoding="utf-8") as f: f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_)) # merges_file (bpecodes) __lowercase = os.path.join(UpperCamelCase_, "bpecodes") if not os.path.isfile(UpperCamelCase_): raise ValueError(F"""path to the file {bpecodes_file} does not exist!""") __lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["merges_file"]) shutil.copyfile(UpperCamelCase_, UpperCamelCase_) # model config __lowercase = os.path.join(UpperCamelCase_, "config.json") __lowercase = { "activation_dropout": args["activation_dropout"], "architectures": ["BioGptForCausalLM"], "attention_probs_dropout_prob": args["attention_dropout"], "bos_token_id": 0, "eos_token_id": 2, "hidden_act": args["activation_fn"], "hidden_dropout_prob": args["dropout"], "hidden_size": args["decoder_embed_dim"], "initializer_range": 0.02, "intermediate_size": args["decoder_ffn_embed_dim"], "layer_norm_eps": 1E-12, "layerdrop": args["decoder_layerdrop"], "max_position_embeddings": args["max_target_positions"], "model_type": "biogpt", "num_attention_heads": args["decoder_attention_heads"], "num_hidden_layers": args["decoder_layers"], "pad_token_id": 1, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_decoder_input_output_embed"], "vocab_size": src_vocab_size, } # good hparam defaults to start with print(F"""Generating {biogpt_model_config_file}""") with open(UpperCamelCase_, "w", encoding="utf-8") as f: f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_)) # tokenizer config __lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_) __lowercase = { "bos_token": "<s>", "eos_token": "</s>", "model_max_length": 1024, "pad_token": "<pad>", "special_tokens_map_file": None, "tokenizer_class": "BioGptTokenizer", "unk_token": "<unk>", } print(F"""Generating {biogpt_tokenizer_config_file}""") with open(UpperCamelCase_, "w", encoding="utf-8") as f: f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_)) # model __lowercase = chkpt["model"] # remove unneeded keys __lowercase = [ "decoder.version", ] for k in ignore_keys: model_state_dict.pop(UpperCamelCase_, UpperCamelCase_) __lowercase = list(model_state_dict.keys()) for layer_name in layer_names: if layer_name.endswith("output_projection.weight"): __lowercase = model_state_dict.pop(UpperCamelCase_) else: __lowercase = model_state_dict.pop(UpperCamelCase_) __lowercase = BioGptConfig.from_pretrained(UpperCamelCase_) __lowercase = BioGptForCausalLM(UpperCamelCase_) # check that it loads ok model_new.load_state_dict(UpperCamelCase_) # save __lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_) print(F"""Generating {pytorch_weights_dump_path}""") torch.save(UpperCamelCase_, UpperCamelCase_) print("Conversion is done!") if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--biogpt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _a = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
17
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: _snake_case = None _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _snake_case = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), }, "tokenizer_file": { "google/bigbird-roberta-base": ( "https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json" ), "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json" ), }, } _snake_case = { "google/bigbird-roberta-base": 4096, "google/bigbird-roberta-large": 4096, "google/bigbird-base-trivia-itc": 4096, } _snake_case = "▁" class lowercase ( UpperCamelCase__ ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = BigBirdTokenizer _a = ["input_ids", "attention_mask"] _a = [] def __init__( self , _a=None , _a=None , _a="<unk>" , _a="<s>" , _a="</s>" , _a="<pad>" , _a="[SEP]" , _a="[MASK]" , _a="[CLS]" , **_a , ) -> List[str]: _A : Tuple = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else bos_token _A : List[str] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else eos_token _A : Tuple = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else unk_token _A : Union[str, Any] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else pad_token _A : int = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else cls_token _A : Optional[Any] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it _A : str = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token super().__init__( UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , **UpperCAmelCase__ , ) _A : Tuple = vocab_file _A : Optional[Any] = False if not self.vocab_file else True def a__ ( self , _a , _a = None ) -> Any: _A : List[str] = [self.sep_token_id] _A : List[str] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self , _a , _a = None , _a = False ) -> str: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase__ )) + [1] return [1] + ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1] def a__ ( self , _a , _a = None ) -> Any: _A : Tuple = [self.sep_token_id] _A : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self , _a , _a = None ) -> Optional[Any]: if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(UpperCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A : int = os.path.join( UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ): copyfile(self.vocab_file , UpperCAmelCase__ ) return (out_vocab_file,)
26
"""simple docstring""" from __future__ import annotations from typing import Any class _lowerCAmelCase : """simple docstring""" def __init__( self : Any, UpperCAmelCase__ : int ): __lowercase = num_of_nodes __lowercase = [] __lowercase = {} def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ): self.m_edges.append([u_node, v_node, weight] ) def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def _lowercase ( self : List[Any], UpperCAmelCase__ : int ): if self.m_component[u_node] != u_node: for k in self.m_component: __lowercase = self.find_component(UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ): if component_size[u_node] <= component_size[v_node]: __lowercase = v_node component_size[v_node] += component_size[u_node] self.set_component(UpperCAmelCase__ ) elif component_size[u_node] >= component_size[v_node]: __lowercase = self.find_component(UpperCAmelCase__ ) component_size[u_node] += component_size[v_node] self.set_component(UpperCAmelCase__ ) def _lowercase ( self : Any ): __lowercase = [] __lowercase = 0 __lowercase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) __lowercase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: __lowercase ,__lowercase ,__lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): __lowercase = [u, v, w] for edge in minimum_weight_edge: if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase ,__lowercase ,__lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 __lowercase = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def _A ( ) -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
17
0
"""simple docstring""" from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def _snake_case ( ) -> tuple[list[int], int]: '''simple docstring''' _A = [randint(-10_00 , 10_00 ) for i in range(10 )] _A = randint(-50_00 , 50_00 ) return (arr, r) a = make_dataset() def _snake_case ( _snake_case : list[int] , _snake_case : int ) -> tuple[int, ...]: '''simple docstring''' for triplet in permutations(UpperCamelCase_ , 3 ): if sum(UpperCamelCase_ ) == target: return tuple(sorted(UpperCamelCase_ ) ) return (0, 0, 0) def _snake_case ( _snake_case : list[int] , _snake_case : int ) -> tuple[int, int, int]: '''simple docstring''' arr.sort() _A = len(UpperCamelCase_ ) for i in range(n - 1 ): _A , _A = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def _snake_case ( ) -> tuple[float, float]: '''simple docstring''' _A = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n' _A = '\ntriplet_sum1(*dataset)\n' _A = '\ntriplet_sum2(*dataset)\n' _A = repeat(setup=UpperCamelCase_ , stmt=UpperCamelCase_ , repeat=5 , number=1_00_00 ) _A = repeat(setup=UpperCamelCase_ , stmt=UpperCamelCase_ , repeat=5 , number=1_00_00 ) return (min(UpperCamelCase_ ), min(UpperCamelCase_ )) if __name__ == "__main__": from doctest import testmod testmod() a = solution_times() print(F'''The time for naive implementation is {times[0]}.''') print(F'''The time for optimized implementation is {times[1]}.''')
315
"""simple docstring""" from math import sqrt def _A ( UpperCamelCase_ : int) -> int: '''simple docstring''' __lowercase = 0 for i in range(1, int(sqrt(UpperCamelCase_) + 1)): if n % i == 0 and i != sqrt(UpperCamelCase_): total += i + n // i elif i == sqrt(UpperCamelCase_): total += i return total - n def _A ( UpperCamelCase_ : int = 10000) -> int: '''simple docstring''' __lowercase = sum( i for i in range(1, UpperCamelCase_) if sum_of_divisors(sum_of_divisors(UpperCamelCase_)) == i and sum_of_divisors(UpperCamelCase_) != i) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
17
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase__ : List[Any] = logging.get_logger(__name__) lowercase__ : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowercase__ : Union[str, Any] = { '''tokenizer_file''': { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''', }, } lowercase__ : str = { '''gpt-neox-20b''': 2_0_4_8, } class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = VOCAB_FILES_NAMES UpperCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ : Optional[int] = ["input_ids", "attention_mask"] def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE="<|endoftext|>" , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->Union[str, Any]: super().__init__( UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , **UpperCAmelCase__ , ) lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase__ ) != add_prefix_space: lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('''type''' ) ) lowerCAmelCase = add_prefix_space lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ ) lowerCAmelCase = add_prefix_space def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Optional[int]: lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ ) def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]: lowerCAmelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) + [self.eos_token_id] ) if len(UpperCAmelCase__ ) > self.model_max_length: lowerCAmelCase = input_ids[-self.model_max_length :] return input_ids
338
"""simple docstring""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _a = _symbol_database.Default() _a = _descriptor_pool.Default().AddSerializedFile( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) _a = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: _a = None _a = b'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _a = 45 _a = 15_81 _a = 15_17 _a = 15_70 _a = 15_84 _a = 17_93 _a = 17_95 _a = 19_16 _a = 18_64 _a = 19_05 _a = 19_19 _a = 24_29 _a = 22_08 _a = 24_18 _a = 23_23 _a = 24_07 # @@protoc_insertion_point(module_scope)
17
0
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) a__ : List[Any] =_symbol_database.Default() a__ : Union[str, Any] =_descriptor_pool.Default().AddSerializedFile( B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03''' ) a__ : List[Any] =globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals) if _descriptor._USE_C_DESCRIPTORS is False: a__ : Optional[Any] =None a__ : Union[str, Any] =B'''H\003''' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" a__ : Any =45 a__ : Dict =1_581 a__ : Optional[int] =1_517 a__ : Dict =1_570 a__ : Tuple =1_584 a__ : Dict =1_793 a__ : Optional[Any] =1_795 a__ : Dict =1_916 a__ : Any =1_864 a__ : Tuple =1_905 a__ : Optional[Any] =1_919 a__ : Union[str, Any] =2_429 a__ : Tuple =2_208 a__ : Any =2_418 a__ : Any =2_323 a__ : Optional[Any] =2_407 # @@protoc_insertion_point(module_scope)
53
"""simple docstring""" import baseaa def _A ( UpperCamelCase_ : str) -> bytes: '''simple docstring''' return baseaa.baaencode(string.encode("utf-8")) def _A ( UpperCamelCase_ : bytes) -> str: '''simple docstring''' return baseaa.baadecode(UpperCamelCase_).decode("utf-8") if __name__ == "__main__": _a = 'Hello World!' _a = baseaa_encode(test) print(encoded) _a = baseaa_decode(encoded) print(decoded)
17
0
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __lowerCamelCase = False __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = """ybelkada/fonts""" def UpperCamelCase ( ): if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """ "Pix2StructImageProcessor. Please upgrade torch." ) def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ): requires_backends(UpperCamelCase_ , ["torch"] ) _check_torch_version() snake_case : Any = image_tensor.unsqueeze(0 ) snake_case : Any = torch.nn.functional.unfold(UpperCamelCase_ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) snake_case : List[Any] = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCamelCase_ , UpperCamelCase_ , -1 ) snake_case : List[str] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : int = 36 , __lowerCamelCase : str = "black" , __lowerCamelCase : str = "white" , __lowerCamelCase : int = 5 , __lowerCamelCase : int = 5 , __lowerCamelCase : int = 5 , __lowerCamelCase : int = 5 , __lowerCamelCase : Optional[bytes] = None , __lowerCamelCase : Optional[str] = None , ): requires_backends(UpperCamelCase_ , "vision" ) # Add new lines so that each line is no more than 80 characters. snake_case : Tuple = textwrap.TextWrapper(width=80 ) snake_case : Optional[int] = wrapper.wrap(text=UpperCamelCase_ ) snake_case : List[Any] = "\n".join(UpperCamelCase_ ) if font_bytes is not None and font_path is None: snake_case : List[str] = io.BytesIO(UpperCamelCase_ ) elif font_path is not None: snake_case : List[str] = font_path else: snake_case : Optional[int] = hf_hub_download(UpperCamelCase_ , "Arial.TTF" ) snake_case : Optional[int] = ImageFont.truetype(UpperCamelCase_ , encoding="UTF-8" , size=UpperCamelCase_ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. snake_case : Dict = ImageDraw.Draw(Image.new("RGB" , (1, 1) , UpperCamelCase_ ) ) snake_case , snake_case , snake_case , snake_case : Optional[int] = temp_draw.textbbox((0, 0) , UpperCamelCase_ , UpperCamelCase_ ) # Create the actual image with a bit of padding around the text. snake_case : str = text_width + left_padding + right_padding snake_case : Optional[Any] = text_height + top_padding + bottom_padding snake_case : Dict = Image.new("RGB" , (image_width, image_height) , UpperCamelCase_ ) snake_case : Tuple = ImageDraw.Draw(UpperCamelCase_ ) draw.text(xy=(left_padding, top_padding) , text=UpperCamelCase_ , fill=UpperCamelCase_ , font=UpperCamelCase_ ) return image def UpperCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : str , **__lowerCamelCase : Union[str, Any] ): requires_backends(UpperCamelCase_ , "vision" ) # Convert to PIL image if necessary snake_case : int = to_pil_image(UpperCamelCase_ ) snake_case : Tuple = render_text(UpperCamelCase_ , **UpperCamelCase_ ) snake_case : Any = max(header_image.width , image.width ) snake_case : str = int(image.height * (new_width / image.width) ) snake_case : Any = int(header_image.height * (new_width / header_image.width) ) snake_case : str = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary snake_case : Tuple = to_numpy_array(UpperCamelCase_ ) if infer_channel_dimension_format(UpperCamelCase_ ) == ChannelDimension.LAST: snake_case : Optional[Any] = to_channel_dimension_format(UpperCamelCase_ , ChannelDimension.LAST ) return new_image class UpperCAmelCase ( A_ ): A__ : List[Any] = ["flattened_patches"] def __init__(self : List[str] , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : int = 20_48 , snake_case__ : bool = False , **snake_case__ : Dict , ) -> int: '''simple docstring''' super().__init__(**UpperCAmelCase__ ) snake_case : Dict = patch_size if patch_size is not None else {"height": 16, "width": 16} snake_case : Any = do_normalize snake_case : Optional[int] = do_convert_rgb snake_case : int = max_patches snake_case : Dict = is_vqa def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : dict , **snake_case__ : Dict ) -> Union[str, Any]: '''simple docstring''' requires_backends(self.extract_flattened_patches , "torch" ) _check_torch_version() # convert to torch snake_case : int = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.FIRST ) snake_case : int = torch.from_numpy(UpperCAmelCase__ ) snake_case , snake_case : Any = patch_size["height"], patch_size["width"] snake_case , snake_case : List[Any] = get_image_size(UpperCAmelCase__ ) # maximize scale s.t. snake_case : Tuple = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) snake_case : Union[str, Any] = max(min(math.floor(scale * image_height / patch_height ) , UpperCAmelCase__ ) , 1 ) snake_case : Optional[Any] = max(min(math.floor(scale * image_width / patch_width ) , UpperCAmelCase__ ) , 1 ) snake_case : List[Any] = max(num_feasible_rows * patch_height , 1 ) snake_case : str = max(num_feasible_cols * patch_width , 1 ) snake_case : Dict = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=UpperCAmelCase__ , antialias=UpperCAmelCase__ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] snake_case : List[Any] = torch_extract_patches(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) snake_case : Optional[Any] = patches.shape snake_case : Optional[Any] = patches_shape[1] snake_case : List[str] = patches_shape[2] snake_case : List[Any] = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] snake_case : str = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] snake_case : int = torch.arange(UpperCAmelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCAmelCase__ ).reshape([rows * columns, 1] ) snake_case : Optional[int] = torch.arange(UpperCAmelCase__ ).reshape([1, columns] ).repeat(UpperCAmelCase__ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] snake_case : Tuple = row_ids.to(torch.floataa ) snake_case : Union[str, Any] = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] snake_case : str = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] snake_case : Any = torch.nn.functional.pad(UpperCAmelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float() snake_case : List[Any] = to_numpy_array(UpperCAmelCase__ ) return result def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : np.ndarray , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Any ) -> List[Any]: '''simple docstring''' if image.dtype == np.uinta: snake_case : Union[str, Any] = image.astype(np.floataa ) # take mean across the whole `image` snake_case : str = np.mean(UpperCAmelCase__ ) snake_case : str = np.std(UpperCAmelCase__ ) snake_case : Union[str, Any] = max(UpperCAmelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , **UpperCAmelCase__ ) def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : ImageInput , snake_case__ : Optional[str] = None , snake_case__ : bool = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Dict[str, int]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : ChannelDimension = ChannelDimension.FIRST , **snake_case__ : Any , ) -> Dict: '''simple docstring''' snake_case : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize snake_case : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case : List[Any] = patch_size if patch_size is not None else self.patch_size snake_case : List[str] = max_patches if max_patches is not None else self.max_patches snake_case : List[Any] = self.is_vqa if kwargs.get("data_format" , UpperCAmelCase__ ) is not None: raise ValueError("data_format is not an accepted input as the outputs are " ) snake_case : Any = make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case : List[str] = [convert_to_rgb(UpperCAmelCase__ ) for image in images] # All transformations expect numpy arrays. snake_case : List[Any] = [to_numpy_array(UpperCAmelCase__ ) for image in images] if is_vqa: if header_text is None: raise ValueError("A header text must be provided for VQA models." ) snake_case : Tuple = kwargs.pop("font_bytes" , UpperCAmelCase__ ) snake_case : Tuple = kwargs.pop("font_path" , UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): snake_case : Tuple = [header_text] * len(UpperCAmelCase__ ) snake_case : Optional[Any] = [ render_header(UpperCAmelCase__ , header_text[i] , font_bytes=UpperCAmelCase__ , font_path=UpperCAmelCase__ ) for i, image in enumerate(UpperCAmelCase__ ) ] if do_normalize: snake_case : List[str] = [self.normalize(image=UpperCAmelCase__ ) for image in images] # convert to torch tensor and permute snake_case : Union[str, Any] = [ self.extract_flattened_patches(image=UpperCAmelCase__ , max_patches=UpperCAmelCase__ , patch_size=UpperCAmelCase__ ) for image in images ] # create attention mask in numpy snake_case : List[str] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] snake_case : List[Any] = BatchFeature( data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=UpperCAmelCase__ ) return encoded_outputs
59
"""simple docstring""" def _A ( UpperCamelCase_ : Any) -> List[str]: '''simple docstring''' __lowercase ,__lowercase = [], [] while len(UpperCamelCase_) > 1: __lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_) start.append(UpperCamelCase_) end.append(UpperCamelCase_) collection.remove(UpperCamelCase_) collection.remove(UpperCamelCase_) end.reverse() return start + collection + end if __name__ == "__main__": _a = input('Enter numbers separated by a comma:\n').strip() _a = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
17
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase : Optional[int] = { """configuration_bridgetower""": [ """BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BridgeTowerConfig""", """BridgeTowerTextConfig""", """BridgeTowerVisionConfig""", ], """processing_bridgetower""": ["""BridgeTowerProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = ["""BridgeTowerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""", """BridgeTowerForContrastiveLearning""", """BridgeTowerForImageAndTextRetrieval""", """BridgeTowerForMaskedLM""", """BridgeTowerModel""", """BridgeTowerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
13
"""simple docstring""" def _A ( UpperCamelCase_ : list[int]) -> float: '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("List is empty") __lowercase = sum(UpperCamelCase_) / len(UpperCamelCase_) # Calculate the average return sum(abs(x - average) for x in nums) / len(UpperCamelCase_) if __name__ == "__main__": import doctest doctest.testmod()
17
0
from __future__ import annotations def A ( a_ ) -> bool: __UpperCamelCase : List[Any] =len(UpperCamelCase_ ) # We need to create solution object to save path. __UpperCamelCase : Union[str, Any] =[[0 for _ in range(UpperCamelCase_ )] for _ in range(UpperCamelCase_ )] __UpperCamelCase : Optional[int] =run_maze(UpperCamelCase_ ,0 ,0 ,UpperCamelCase_ ) if solved: print('\n'.join(str(UpperCamelCase_ ) for row in solutions ) ) else: print('No solution exists!' ) return solved def A ( a_ ,a_ ,a_ ,a_ ) -> bool: __UpperCamelCase : Optional[int] =len(UpperCamelCase_ ) # Final check point. if i == j == (size - 1): __UpperCamelCase : Union[str, Any] =1 return True __UpperCamelCase : str =(not i < 0) and (not j < 0) # Check lower bounds __UpperCamelCase : int =(i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. __UpperCamelCase : str =(not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited __UpperCamelCase : Any =1 # check for directions if ( run_maze(UpperCamelCase_ ,i + 1 ,UpperCamelCase_ ,UpperCamelCase_ ) or run_maze(UpperCamelCase_ ,UpperCamelCase_ ,j + 1 ,UpperCamelCase_ ) or run_maze(UpperCamelCase_ ,i - 1 ,UpperCamelCase_ ,UpperCamelCase_ ) or run_maze(UpperCamelCase_ ,UpperCamelCase_ ,j - 1 ,UpperCamelCase_ ) ): return True __UpperCamelCase : Optional[int] =0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
71
"""simple docstring""" import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ): __lowercase = parent __lowercase = vocab_size __lowercase = batch_size __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = is_training __lowercase = use_labels __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = type_sequence_label_size __lowercase = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase = (image_size // patch_size) ** 2 __lowercase = num_patches + 1 def _lowercase ( self : int ): __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size ) __lowercase = BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, ) return config, pixel_values, labels def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ): __lowercase = FlaxBeitModel(config=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ): __lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ): __lowercase = self.type_sequence_label_size __lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowercase = 1 __lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ ) __lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowercase = model(UpperCAmelCase__ ) def _lowercase ( self : List[str] ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) = config_and_inputs __lowercase = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class _lowerCAmelCase ( lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def _lowercase ( self : List[Any] ): __lowercase = FlaxBeitModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 ) def _lowercase ( self : Union[str, Any] ): self.config_tester.run_common_tests() def _lowercase ( self : Optional[int] ): __lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(UpperCAmelCase__ ) __lowercase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ["pixel_values"] self.assertListEqual(arg_names[:1], UpperCAmelCase__ ) def _lowercase ( self : Tuple ): __lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = model_class(UpperCAmelCase__ ) @jax.jit def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ): return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ ) with self.subTest("JIT Enabled" ): __lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ): self.assertEqual(jitted_output.shape, output.shape ) def _lowercase ( self : List[str] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def _lowercase ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def _lowercase ( self : Tuple ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def _lowercase ( self : Union[str, Any] ): for model_class_name in self.all_model_classes: __lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" ) __lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) ) self.assertIsNotNone(UpperCAmelCase__ ) def _A ( ) -> str: '''simple docstring''' __lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @require_flax class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _lowercase ( self : Any ): __lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values # prepare bool_masked_pos __lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ ) # forward pass __lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ ) __lowercase = outputs.logits # verify the logits __lowercase = (1, 1_9_6, 8_1_9_2) self.assertEqual(logits.shape, UpperCAmelCase__ ) __lowercase = np.array( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) ) @slow def _lowercase ( self : Any ): __lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ) # forward pass __lowercase = model(**UpperCAmelCase__ ) __lowercase = outputs.logits # verify the logits __lowercase = (1, 1_0_0_0) self.assertEqual(logits.shape, UpperCAmelCase__ ) __lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] ) self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) ) __lowercase = 2_8_1 self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ ) @slow def _lowercase ( self : List[str] ): __lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ) # forward pass __lowercase = model(**UpperCAmelCase__ ) __lowercase = outputs.logits # verify the logits __lowercase = (1, 2_1_8_4_1) self.assertEqual(logits.shape, UpperCAmelCase__ ) __lowercase = np.array([1.6_881, -0.2_787, 0.5_901] ) self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) ) __lowercase = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
17
0
'''simple docstring''' from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = "openai/whisper-base" _SCREAMING_SNAKE_CASE : Union[str, Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) _SCREAMING_SNAKE_CASE : List[str] = "transcriber" _SCREAMING_SNAKE_CASE : Optional[Any] = WhisperProcessor _SCREAMING_SNAKE_CASE : str = WhisperForConditionalGeneration _SCREAMING_SNAKE_CASE : List[str] = ["audio"] _SCREAMING_SNAKE_CASE : Tuple = ["text"] def __A ( self , __UpperCAmelCase ) -> int: '''simple docstring''' return self.pre_processor(UpperCAmelCase__ , return_tensors="""pt""" ).input_features def __A ( self , __UpperCAmelCase ) -> Dict: '''simple docstring''' return self.model.generate(inputs=UpperCAmelCase__ ) def __A ( self , __UpperCAmelCase ) -> int: '''simple docstring''' return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0]
254
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _lowerCAmelCase ( unittest.TestCase ,lowercase ): """simple docstring""" def _lowercase ( self : List[Any] ): __lowercase = load_tool("text-classification" ) self.tool.setup() __lowercase = load_tool("text-classification", remote=UpperCAmelCase__ ) def _lowercase ( self : str ): __lowercase = self.tool("That's quite cool", ["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" ) def _lowercase ( self : str ): __lowercase = self.remote_tool("That's quite cool", ["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" ) def _lowercase ( self : List[str] ): __lowercase = self.tool(text="That's quite cool", labels=["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" ) def _lowercase ( self : Tuple ): __lowercase = self.remote_tool(text="That's quite cool", labels=["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" )
17
0
'''simple docstring''' import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class UpperCamelCase_ : @staticmethod def _lowercase( *A , **A ) -> int: pass def __lowerCamelCase ( _lowercase ) -> Any: return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. a : Dict = ( """https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png""" ) @is_pipeline_test @require_torch @require_vision class UpperCamelCase_ ( unittest.TestCase ): lowercase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def _lowercase( self , A , A , A ) -> Optional[int]: UpperCAmelCase : int = pipeline( """document-question-answering""" , model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) UpperCAmelCase : Optional[int] = INVOICE_URL UpperCAmelCase : Dict = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ) , UpperCAmelCase__ , """""" ) ) ) UpperCAmelCase : Optional[Any] = """What is the placebo?""" UpperCAmelCase : Dict = [ { """image""": load_image(UpperCAmelCase__ ), """question""": question, }, { """image""": image, """question""": question, }, { """image""": image, """question""": question, """word_boxes""": word_boxes, }, ] return dqa_pipeline, examples def _lowercase( self , A , A ) -> Union[str, Any]: UpperCAmelCase : int = dqa_pipeline(UpperCAmelCase__ , top_k=2 ) self.assertEqual( UpperCAmelCase__ , [ [ {"""score""": ANY(UpperCAmelCase__ ), """answer""": ANY(UpperCAmelCase__ ), """start""": ANY(UpperCAmelCase__ ), """end""": ANY(UpperCAmelCase__ )}, {"""score""": ANY(UpperCAmelCase__ ), """answer""": ANY(UpperCAmelCase__ ), """start""": ANY(UpperCAmelCase__ ), """end""": ANY(UpperCAmelCase__ )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def _lowercase( self ) -> str: UpperCAmelCase : int = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" ) UpperCAmelCase : Any = INVOICE_URL UpperCAmelCase : List[Any] = """How many cats are there?""" UpperCAmelCase : int = [ {"""score""": 0.0_0_0_1, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39}, {"""score""": 0.0_0_0_1, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40}, ] UpperCAmelCase : List[str] = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__ , decimals=4 ) , UpperCAmelCase__ ) UpperCAmelCase : int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__ , decimals=4 ) , UpperCAmelCase__ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably UpperCAmelCase : List[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png""" UpperCAmelCase : int = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 ) self.assertEqual(UpperCAmelCase__ , [] ) # We can optionnally pass directly the words and bounding boxes UpperCAmelCase : Any = """./tests/fixtures/tests_samples/COCO/000000039769.png""" UpperCAmelCase : List[str] = [] UpperCAmelCase : Tuple = [] UpperCAmelCase : Dict = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , words=UpperCAmelCase__ , boxes=UpperCAmelCase__ , top_k=2 ) self.assertEqual(UpperCAmelCase__ , [] ) @slow @require_torch @require_detectrona @require_pytesseract def _lowercase( self ) -> List[Any]: UpperCAmelCase : Tuple = pipeline( """document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , ) UpperCAmelCase : Optional[Any] = INVOICE_URL UpperCAmelCase : Optional[Any] = """What is the invoice number?""" UpperCAmelCase : Tuple = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {"""score""": 0.9_9_4_4, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.0_0_0_9, """answer""": """us-001""", """start""": 16, """end""": 16}, ] , ) UpperCAmelCase : Any = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {"""score""": 0.9_9_4_4, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.0_0_0_9, """answer""": """us-001""", """start""": 16, """end""": 16}, ] , ) UpperCAmelCase : Any = dqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ [ {"""score""": 0.9_9_4_4, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.0_0_0_9, """answer""": """us-001""", """start""": 16, """end""": 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Any = pipeline( """document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , ) UpperCAmelCase : Dict = INVOICE_URL UpperCAmelCase : Optional[Any] = """What is the invoice number?""" UpperCAmelCase : Tuple = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {"""score""": 0.9_9_7_4, """answer""": """1110212019""", """start""": 23, """end""": 23}, {"""score""": 0.9_9_4_8, """answer""": """us-001""", """start""": 16, """end""": 16}, ] , ) UpperCAmelCase : Dict = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {"""score""": 0.9_9_7_4, """answer""": """1110212019""", """start""": 23, """end""": 23}, {"""score""": 0.9_9_4_8, """answer""": """us-001""", """start""": 16, """end""": 16}, ] , ) UpperCAmelCase : int = dqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ [ {"""score""": 0.9_9_7_4, """answer""": """1110212019""", """start""": 23, """end""": 23}, {"""score""": 0.9_9_4_8, """answer""": """us-001""", """start""": 16, """end""": 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def _lowercase( self ) -> str: UpperCAmelCase : str = AutoTokenizer.from_pretrained( """impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=UpperCAmelCase__ ) UpperCAmelCase : Union[str, Any] = pipeline( """document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=UpperCAmelCase__ , revision="""3dc6de3""" , ) UpperCAmelCase : int = INVOICE_URL UpperCAmelCase : Dict = """What is the invoice number?""" UpperCAmelCase : List[str] = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23}, ] , ) UpperCAmelCase : Union[str, Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23}, ] , ) UpperCAmelCase : Optional[int] = dqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ [ {"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23}, ] ] * 2 , ) UpperCAmelCase : Optional[int] = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ) , UpperCAmelCase__ , """""" ) ) ) # This model should also work if `image` is set to None UpperCAmelCase : int = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def _lowercase( self ) -> int: UpperCAmelCase : str = AutoTokenizer.from_pretrained( """impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=UpperCAmelCase__ ) UpperCAmelCase : List[Any] = pipeline( """document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=UpperCAmelCase__ , revision="""3dc6de3""" , max_seq_len=50 , ) UpperCAmelCase : List[str] = INVOICE_URL UpperCAmelCase : List[str] = """What is the invoice number?""" UpperCAmelCase : List[Any] = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {"""score""": 0.9_9_9_9, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.9_9_9_8, """answer""": """us-001""", """start""": 16, """end""": 16}, ] , ) UpperCAmelCase : List[str] = dqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ [ {"""score""": 0.9_9_9_9, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.9_9_9_8, """answer""": """us-001""", """start""": 16, """end""": 16}, ] ] * 2 , ) UpperCAmelCase : Any = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ) , UpperCAmelCase__ , """""" ) ) ) # This model should also work if `image` is set to None UpperCAmelCase : Tuple = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {"""score""": 0.9_9_9_9, """answer""": """us-001""", """start""": 16, """end""": 16}, {"""score""": 0.9_9_9_8, """answer""": """us-001""", """start""": 16, """end""": 16}, ] , ) @slow @require_torch def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[int] = pipeline( """document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , ) UpperCAmelCase : Optional[Any] = INVOICE_URL UpperCAmelCase : Union[str, Any] = """What is the invoice number?""" UpperCAmelCase : str = dqa_pipeline(image=UpperCAmelCase__ , question=UpperCAmelCase__ , top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__ , decimals=4 ) , [{"""answer""": """us-001"""}] ) @require_tf @unittest.skip("""Document question answering not implemented in TF""" ) def _lowercase( self ) -> int: pass
265
"""simple docstring""" from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker _a = 'CompVis/stable-diffusion-v1-1' _a = 'CompVis/stable-diffusion-v1-2' _a = 'CompVis/stable-diffusion-v1-3' _a = 'CompVis/stable-diffusion-v1-4' class _lowerCAmelCase ( lowercase ): """simple docstring""" def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCAmelCase__ : StableDiffusionSafetyChecker, UpperCAmelCase__ : CLIPImageProcessor, UpperCAmelCase__ : bool = True, ): super()._init_() __lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ ) __lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ ) __lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ ) __lowercase = StableDiffusionPipeline( vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, requires_safety_checker=UpperCAmelCase__, ) self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea ) @property def _lowercase ( self : List[str] ): return {k: getattr(self, UpperCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )} def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowercase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase__ ) def _lowercase ( self : List[str] ): self.enable_attention_slicing(UpperCAmelCase__ ) @torch.no_grad() def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Tuple, ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : str, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Any, ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Optional[int], ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ): __lowercase = "cuda" if torch.cuda.is_available() else "cpu" self.to(UpperCAmelCase__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get first result from Stable Diffusion Checkpoint v1.2 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get first result from Stable Diffusion Checkpoint v1.3 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get first result from Stable Diffusion Checkpoint v1.4 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
17
0
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=10 ): """simple docstring""" A__ = [] for _ in range(UpperCamelCase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=10 ): """simple docstring""" A__ = [] for step in range(UpperCamelCase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A__ = os.path.join(UpperCamelCase_ , 'schedule.bin' ) torch.save(scheduler.state_dict() , UpperCamelCase_ ) A__ = torch.load(UpperCamelCase_ ) scheduler.load_state_dict(UpperCamelCase_ ) return lrs @require_torch class UpperCamelCase__( unittest.TestCase ): def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int: self.assertEqual(len(UpperCAmelCase__ ) ,len(UpperCAmelCase__ ) ) for a, b in zip(UpperCAmelCase__ ,UpperCAmelCase__ ): self.assertAlmostEqual(UpperCAmelCase__ ,UpperCAmelCase__ ,delta=UpperCAmelCase__ ) def snake_case__ ( self ) -> str: A__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=UpperCAmelCase__ ) A__ = torch.tensor([0.4, 0.2, -0.5] ) A__ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ = AdamW(params=[w] ,lr=2e-1 ,weight_decay=0.0 ) for _ in range(1_00 ): A__ = criterion(UpperCAmelCase__ ,UpperCAmelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1e-2 ) def snake_case__ ( self ) -> str: A__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=UpperCAmelCase__ ) A__ = torch.tensor([0.4, 0.2, -0.5] ) A__ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ = Adafactor( params=[w] ,lr=1e-2 ,eps=(1e-30, 1e-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=UpperCAmelCase__ ,weight_decay=0.0 ,relative_step=UpperCAmelCase__ ,scale_parameter=UpperCAmelCase__ ,warmup_init=UpperCAmelCase__ ,) for _ in range(10_00 ): A__ = criterion(UpperCAmelCase__ ,UpperCAmelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1e-2 ) @require_torch class UpperCamelCase__( unittest.TestCase ): lowerCAmelCase__ : Any = nn.Linear(50 , 50 ) if is_torch_available() else None lowerCAmelCase__ : Optional[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None lowerCAmelCase__ : Tuple = 10 def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> Optional[int]: self.assertEqual(len(UpperCAmelCase__ ) ,len(UpperCAmelCase__ ) ) for a, b in zip(UpperCAmelCase__ ,UpperCAmelCase__ ): self.assertAlmostEqual(UpperCAmelCase__ ,UpperCAmelCase__ ,delta=UpperCAmelCase__ ,msg=UpperCAmelCase__ ) def snake_case__ ( self ) -> Optional[int]: A__ = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A__ = { get_constant_schedule: ({}, [1_0.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4], ), } for scheduler_func, data in scheds.items(): A__ , A__ = data A__ = scheduler_func(self.optimizer ,**UpperCAmelCase__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 ) A__ = unwrap_schedule(UpperCAmelCase__ ,self.num_steps ) self.assertListAlmostEqual( UpperCAmelCase__ ,UpperCAmelCase__ ,tol=1e-2 ,msg=f'''failed for {scheduler_func} in normal scheduler''' ,) A__ = scheduler_func(self.optimizer ,**UpperCAmelCase__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase__ ) # wrap to test picklability of the schedule A__ = unwrap_and_save_reload_schedule(UpperCAmelCase__ ,self.num_steps ) self.assertListEqual(UpperCAmelCase__ ,UpperCAmelCase__ ,msg=f'''failed for {scheduler_func} in save and reload''' ) class UpperCamelCase__: def __init__( self ,__UpperCAmelCase ) -> List[str]: A__ = fn def __call__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str: return self.fn(*UpperCAmelCase__ ,**UpperCAmelCase__ ) @classmethod def snake_case__ ( self ,__UpperCAmelCase ) -> Union[str, Any]: A__ = list(map(self ,scheduler.lr_lambdas ) )
221
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _lowerCAmelCase ( lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = "ssube/stable-diffusion-x4-upscaler-onnx" def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[str]=0 ): __lowercase = floats_tensor((1, 3, 1_2_8, 1_2_8), rng=random.Random(UpperCAmelCase__ ) ) __lowercase = torch.manual_seed(UpperCAmelCase__ ) __lowercase = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def _lowercase ( self : Any ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def _lowercase ( self : Optional[Any] ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowercase ( self : int ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowercase ( self : str ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowercase ( self : Any ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @property def _lowercase ( self : Tuple ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowercase ( self : Dict ): __lowercase = ort.SessionOptions() __lowercase = False return options def _lowercase ( self : Dict ): __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowercase = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = "A fantasy landscape, trending on artstation" __lowercase = torch.manual_seed(0 ) __lowercase = pipe( prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=UpperCAmelCase__, output_type="np", ) __lowercase = output.images __lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _lowercase ( self : str ): __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowercase = init_image.resize((1_2_8, 1_2_8) ) __lowercase = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" ) __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", scheduler=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = "A fantasy landscape, trending on artstation" __lowercase = torch.manual_seed(0 ) __lowercase = pipe( prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=UpperCAmelCase__, output_type="np", ) __lowercase = output.images __lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
17
0
def lowerCAmelCase__( lowercase : list[list[int]] , lowercase : int , lowercase : int , lowercase : list[int] ) -> bool: if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def lowerCAmelCase__( lowercase : list[list[int]] , lowercase : list[int] , lowercase : int ) -> bool: if curr_ind == len(UpperCamelCase_ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(UpperCamelCase_ ) ): if valid_connection(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): # Insert current vertex into path as next transition __snake_case : Optional[int] = next_ver # Validate created path if util_hamilton_cycle(UpperCamelCase_ , UpperCamelCase_ , curr_ind + 1 ): return True # Backtrack __snake_case : List[Any] = -1 return False def lowerCAmelCase__( lowercase : list[list[int]] , lowercase : int = 0 ) -> list[int]: __snake_case : List[str] = [-1] * (len(UpperCamelCase_ ) + 1) # initialize start and end of path with starting index __snake_case : Optional[Any] = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(UpperCamelCase_ , UpperCamelCase_ , 1 ) else []
326
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _a = datasets.utils.logging.get_logger(__name__) _a = ['names', 'prefix'] _a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] _a = ['encoding_errors', 'on_bad_lines'] _a = ['date_format'] @dataclass class _lowerCAmelCase ( datasets.BuilderConfig ): """simple docstring""" __UpperCAmelCase : str = "," __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[Union[int, List[int], str]] = "infer" __UpperCAmelCase : Optional[List[str]] = None __UpperCAmelCase : Optional[List[str]] = None __UpperCAmelCase : Optional[Union[int, str, List[int], List[str]]] = None __UpperCAmelCase : Optional[Union[List[int], List[str]]] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : bool = True __UpperCAmelCase : Optional[Literal["c", "python", "pyarrow"]] = None __UpperCAmelCase : Dict[Union[int, str], Callable[[Any], Any]] = None __UpperCAmelCase : Optional[list] = None __UpperCAmelCase : Optional[list] = None __UpperCAmelCase : bool = False __UpperCAmelCase : Optional[Union[int, List[int]]] = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[Union[str, List[str]]] = None __UpperCAmelCase : bool = True __UpperCAmelCase : bool = True __UpperCAmelCase : bool = False __UpperCAmelCase : bool = True __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : str = "." __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : str = '"' __UpperCAmelCase : int = 0 __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : bool = True __UpperCAmelCase : bool = True __UpperCAmelCase : int = 0 __UpperCAmelCase : bool = True __UpperCAmelCase : bool = False __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : int = 1_0_0_0_0 __UpperCAmelCase : Optional[datasets.Features] = None __UpperCAmelCase : Optional[str] = "strict" __UpperCAmelCase : Literal["error", "warn", "skip"] = "error" __UpperCAmelCase : Optional[str] = None def _lowercase ( self : Tuple ): if self.delimiter is not None: __lowercase = self.delimiter if self.column_names is not None: __lowercase = self.column_names @property def _lowercase ( self : Union[str, Any] ): __lowercase = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), UpperCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class _lowerCAmelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" __UpperCAmelCase : Tuple = CsvConfig def _lowercase ( self : List[str] ): return datasets.DatasetInfo(features=self.config.features ) def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __lowercase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase__, (str, list, tuple) ): __lowercase = data_files if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = [files] __lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )] __lowercase = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = [files] __lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__, gen_kwargs={"files": files} ) ) return splits def _lowercase ( self : Dict, UpperCAmelCase__ : pa.Table ): if self.config.features is not None: __lowercase = self.config.features.arrow_schema if all(not require_storage_cast(UpperCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast __lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=UpperCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example __lowercase = table_cast(UpperCAmelCase__, UpperCAmelCase__ ) return pa_table def _lowercase ( self : Optional[Any], UpperCAmelCase__ : List[str] ): __lowercase = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str __lowercase = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase__ ) else object for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ): __lowercase = pd.read_csv(UpperCAmelCase__, iterator=UpperCAmelCase__, dtype=UpperCAmelCase__, **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(UpperCAmelCase__ ): __lowercase = pa.Table.from_pandas(UpperCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__ ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase__ )}: {e}""" ) raise
17
0
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( UpperCamelCase__,unittest.TestCase ): _a = FunnelTokenizer _a = FunnelTokenizerFast _a = True _a = True def a__ ( self ) -> Optional[int]: super().setUp() _A : Tuple = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] _A : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def a__ ( self , **_a ) -> Union[str, Any]: return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def a__ ( self , **_a ) -> int: return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def a__ ( self , _a ) -> int: _A : Tuple = """UNwant\u00E9d,running""" _A : Optional[Any] = """unwanted, running""" return input_text, output_text def a__ ( self ) -> Optional[int]: _A : Tuple = self.tokenizer_class(self.vocab_file ) _A : Union[str, Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCAmelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [7, 4, 5, 10, 8, 9] ) def a__ ( self ) -> int: _A : Union[str, Any] = self.get_tokenizers(do_lower_case=UpperCAmelCase__ ) for tokenizer in tokenizers: _A : Union[str, Any] = tokenizer("""UNwant\u00E9d,running""" ) _A : int = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) _A : int = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
26
"""simple docstring""" from scipy.stats import spearmanr import datasets _a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n' _a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n' _a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): """simple docstring""" def _lowercase ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], ) def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ): __lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
17
0
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: a = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowercase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : int , _UpperCAmelCase : Any , _UpperCAmelCase : str=7 , _UpperCAmelCase : int=3 , _UpperCAmelCase : List[Any]=18 , _UpperCAmelCase : Optional[Any]=30 , _UpperCAmelCase : Optional[int]=400 , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=None , ): _A = size if size is not None else {'height': 20, 'width': 20} _A = parent _A = batch_size _A = num_channels _A = image_size _A = min_resolution _A = max_resolution _A = size _A = do_normalize _A = do_convert_rgb _A = [512, 1_024, 2_048, 4_096] _A = patch_size if patch_size is not None else {'height': 16, 'width': 16} def lowerCAmelCase_ ( self : Tuple ): return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def lowerCAmelCase_ ( self : Any ): _A = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg' _A = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ).convert('RGB' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = PixaStructImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : Union[str, Any] ): _A = PixaStructImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : Dict ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : int ): _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , 'do_normalize' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'do_convert_rgb' ) ) def lowerCAmelCase_ ( self : int ): _A = self.image_processor_tester.prepare_dummy_image() _A = self.image_processing_class(**self.image_processor_dict ) _A = 2_048 _A = image_processor(UpperCAmelCase__ , return_tensors='pt' , max_patches=UpperCAmelCase__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) ) def lowerCAmelCase_ ( self : Dict ): # Initialize image_processor _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input _A = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _A = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _A = image_processor( UpperCAmelCase__ , return_tensors='pt' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCAmelCase_ ( self : Tuple ): # Initialize image_processor _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input _A = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 _A = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(UpperCAmelCase__ ): _A = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=UpperCAmelCase__ ).flattened_patches _A = 'Hello' _A = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=UpperCAmelCase__ , header_text=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _A = image_processor( UpperCAmelCase__ , return_tensors='pt' , max_patches=UpperCAmelCase__ , header_text=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCAmelCase_ ( self : Any ): # Initialize image_processor _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) _A = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _A = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _A = image_processor( UpperCAmelCase__ , return_tensors='pt' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCAmelCase_ ( self : Dict ): # Initialize image_processor _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input _A = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _A = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _A = image_processor( UpperCAmelCase__ , return_tensors='pt' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : List[Any] = PixaStructImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : str ): _A = PixaStructImageProcessingTester(self , num_channels=4 ) _A = 3 @property def lowerCAmelCase_ ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : str ): _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , 'do_normalize' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , 'do_convert_rgb' ) ) def lowerCAmelCase_ ( self : List[Any] ): # Initialize image_processor _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input _A = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _A = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _A = image_processor( UpperCAmelCase__ , return_tensors='pt' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
315
"""simple docstring""" from collections.abc import Sequence def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_)) def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float: '''simple docstring''' __lowercase = 0.0 for coeff in reversed(UpperCamelCase_): __lowercase = result * x + coeff return result if __name__ == "__main__": _a = (0.0, 0.0, 5.0, 9.3, 7.0) _a = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
17
0
from ...configuration_utils import PretrainedConfig lowercase__ : int = { '''google/tapas-base-finetuned-sqa''': ( '''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wtq''': ( '''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wikisql-supervised''': ( '''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json''' ), '''google/tapas-base-finetuned-tabfact''': ( '''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json''' ), } class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = "tapas" def __init__( self , __SCREAMING_SNAKE_CASE=30522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=[3, 256, 256, 2, 256, 256, 10] , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1_0.0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="ratio" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) ->List[Any]: super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_sizes lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps # Fine-tuning task hyperparameters lowerCAmelCase = positive_label_weight lowerCAmelCase = num_aggregation_labels lowerCAmelCase = aggregation_loss_weight lowerCAmelCase = use_answer_as_supervision lowerCAmelCase = answer_loss_importance lowerCAmelCase = use_normalized_answer_loss lowerCAmelCase = huber_loss_delta lowerCAmelCase = temperature lowerCAmelCase = aggregation_temperature lowerCAmelCase = use_gumbel_for_cells lowerCAmelCase = use_gumbel_for_aggregation lowerCAmelCase = average_approximation_function lowerCAmelCase = cell_selection_preference lowerCAmelCase = answer_loss_cutoff lowerCAmelCase = max_num_rows lowerCAmelCase = max_num_columns lowerCAmelCase = average_logits_per_cell lowerCAmelCase = select_one_column lowerCAmelCase = allow_empty_column_selection lowerCAmelCase = init_cell_selection_weights_to_zero lowerCAmelCase = reset_position_index_per_cell lowerCAmelCase = disable_per_token_loss # Aggregation hyperparameters lowerCAmelCase = aggregation_labels lowerCAmelCase = no_aggregation_label_index if isinstance(self.aggregation_labels , UpperCAmelCase__ ): lowerCAmelCase = {int(UpperCAmelCase__ ): v for k, v in aggregation_labels.items()}
338
"""simple docstring""" import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class _lowerCAmelCase ( pl.LightningModule ): """simple docstring""" def __init__( self : Optional[Any], UpperCAmelCase__ : str ): super().__init__() __lowercase = model __lowercase = 2 __lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels ) def _lowercase ( self : Optional[int] ): pass def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str: '''simple docstring''' __lowercase = LongformerModel.from_pretrained(UpperCamelCase_) __lowercase = LightningModel(UpperCamelCase_) __lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu")) lightning_model.load_state_dict(ckpt["state_dict"]) # init longformer question answering model __lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict()) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict()) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(UpperCamelCase_) print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""") if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
17
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor a__ : int =logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : int , *__A : Dict , **__A : Optional[Any] ): warnings.warn( 'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use SegformerImageProcessor instead.' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
53
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[int] ): if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, ) assert hasattr(self, "env" ) def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ): # configuration for running training on smdistributed Model Parallel __lowercase = { "enabled": True, "processes_per_host": 8, } __lowercase = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } __lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} __lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 5_0_0, }, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", ) def _lowercase ( self : Tuple, UpperCAmelCase__ : int ): TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ): # create estimator __lowercase = self.create_estimator(UpperCAmelCase__ ) # run training estimator.fit() # result dataframe __lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis __lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) __lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping __lowercase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
17
0
from math import pow, sqrt def UpperCamelCase ( *__lowerCamelCase : float ): snake_case : Optional[int] = len(UpperCamelCase_ ) > 0 and all(value > 0.0 for value in values ) return result def UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float ): return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(UpperCamelCase_ , UpperCamelCase_ ) else ValueError("Input Error: Molar mass values must greater than 0." ) ) def UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ): return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) ) def UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ): return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) ) def UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ): return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) ) def UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ): return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) )
59
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : Tuple = "openai/whisper-base" __UpperCAmelCase : Union[str, Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __UpperCAmelCase : List[str] = "transcriber" __UpperCAmelCase : Optional[Any] = WhisperProcessor __UpperCAmelCase : str = WhisperForConditionalGeneration __UpperCAmelCase : List[str] = ["audio"] __UpperCAmelCase : Tuple = ["text"] def _lowercase ( self : str, UpperCAmelCase__ : int ): return self.pre_processor(UpperCAmelCase__, return_tensors="pt" ).input_features def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ): return self.model.generate(inputs=UpperCAmelCase__ ) def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int] ): return self.pre_processor.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )[0]
17
0
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass lowerCAmelCase : Union[str, Any] = (3, 9, -11, 0, 7, 5, 1, -1) lowerCAmelCase : str = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class __lowercase : """simple docstring""" _UpperCAmelCase : int _UpperCAmelCase : Node | None class __lowercase : """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : Iterable[int]): SCREAMING_SNAKE_CASE_: str = None for i in sorted(UpperCAmelCase__ , reverse=UpperCAmelCase__): SCREAMING_SNAKE_CASE_: Dict = Node(UpperCAmelCase__ , self.head) def __iter__( self : str): SCREAMING_SNAKE_CASE_: List[str] = self.head while node: yield node.data SCREAMING_SNAKE_CASE_: Any = node.next_node def __len__( self : Optional[int]): return sum(1 for _ in self) def __str__( self : List[str]): return " -> ".join([str(UpperCAmelCase__) for node in self]) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : Optional[Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
13
"""simple docstring""" import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]: '''simple docstring''' if isinstance(UpperCamelCase_, torch.Tensor): return image elif isinstance(UpperCamelCase_, PIL.Image.Image): __lowercase = [image] if isinstance(image[0], PIL.Image.Image): __lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] __lowercase = np.concatenate(UpperCamelCase_, axis=0) __lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0 __lowercase = image.transpose(0, 3, 1, 2) __lowercase = 2.0 * image - 1.0 __lowercase = torch.from_numpy(UpperCamelCase_) elif isinstance(image[0], torch.Tensor): __lowercase = torch.cat(UpperCamelCase_, dim=0) return image def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int: '''simple docstring''' if not isinstance(UpperCamelCase_, np.ndarray): __lowercase = True __lowercase = va.device __lowercase = va.cpu().numpy() __lowercase = va.cpu().numpy() __lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_))) if np.abs(UpperCamelCase_) > DOT_THRESHOLD: __lowercase = (1 - t) * va + t * va else: __lowercase = np.arccos(UpperCamelCase_) __lowercase = np.sin(UpperCamelCase_) __lowercase = theta_a * t __lowercase = np.sin(UpperCamelCase_) __lowercase = np.sin(theta_a - theta_t) / sin_theta_a __lowercase = sin_theta_t / sin_theta_a __lowercase = sa * va + sa * va if inputs_are_torch: __lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_) return va def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int: '''simple docstring''' __lowercase = F.normalize(UpperCamelCase_, dim=-1) __lowercase = F.normalize(UpperCamelCase_, dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]: '''simple docstring''' for param in model.parameters(): __lowercase = value class _lowerCAmelCase ( lowercase ): """simple docstring""" def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ): super().__init__() self.register_modules( vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, ) __lowercase = ( feature_extractor.size if isinstance(feature_extractor.size, UpperCAmelCase__ ) else feature_extractor.size["shortest_edge"] ) __lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std ) set_requires_grad(self.text_encoder, UpperCAmelCase__ ) set_requires_grad(self.clip_model, UpperCAmelCase__ ) def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowercase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase__ ) def _lowercase ( self : int ): self.enable_attention_slicing(UpperCAmelCase__ ) def _lowercase ( self : str ): set_requires_grad(self.vae, UpperCAmelCase__ ) def _lowercase ( self : Any ): set_requires_grad(self.vae, UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any] ): set_requires_grad(self.unet, UpperCAmelCase__ ) def _lowercase ( self : Any ): set_requires_grad(self.unet, UpperCAmelCase__ ) def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ): # get the original timestep using init_timestep __lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ ) __lowercase = max(num_inference_steps - init_timestep, 0 ) __lowercase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ): if not isinstance(UpperCAmelCase__, torch.Tensor ): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" ) __lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ ) ] __lowercase = torch.cat(UpperCAmelCase__, dim=0 ) else: __lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 0.18_215 * init_latents __lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 ) __lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ ) # get latents __lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = init_latents return latents def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ): __lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): __lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) ) __lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," ) def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ): __lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ ) __lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half() __lowercase = self.clip_model.get_image_features(UpperCAmelCase__ ) __lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ ) __lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 ) return image_embeddings_clip @torch.enable_grad() def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ): __lowercase = latents.detach().requires_grad_() __lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) # predict the noise residual __lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): __lowercase = self.scheduler.alphas_cumprod[timestep] __lowercase = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 __lowercase = torch.sqrt(UpperCAmelCase__ ) __lowercase = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler, UpperCAmelCase__ ): __lowercase = self.scheduler.sigmas[index] __lowercase = latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 1 / 0.18_215 * sample __lowercase = self.vae.decode(UpperCAmelCase__ ).sample __lowercase = (image / 2 + 0.5).clamp(0, 1 ) __lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ ) __lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype ) __lowercase = self.clip_model.get_image_features(UpperCAmelCase__ ) __lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ ) __lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale __lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0] if isinstance(self.scheduler, UpperCAmelCase__ ): __lowercase = latents.detach() + grads * (sigma**2) __lowercase = noise_pred_original else: __lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ): if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1: __lowercase = [generator] + [None] * (batch_size - 1) __lowercase = [ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] __lowercase = [x[0] for x in coca_is_none if x[1]] __lowercase = ", ".join(UpperCAmelCase__ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(UpperCAmelCase__ ): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) __lowercase = self.get_image_description(UpperCAmelCase__ ) if style_prompt is None: if len(UpperCAmelCase__ ): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) __lowercase = self.get_image_description(UpperCAmelCase__ ) # get prompt text embeddings for content and style __lowercase = self.tokenizer( UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", ) __lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] __lowercase = self.tokenizer( UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", ) __lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] __lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # duplicate text embeddings for each generation per prompt __lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 ) # set timesteps __lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) __lowercase = {} if accepts_offset: __lowercase = 1 self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) __lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device ) __lowercase = timesteps[:1].repeat(UpperCAmelCase__ ) # Preprocess image __lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.prepare_latents( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ ) __lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.prepare_latents( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ ) __lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) if clip_guidance_scale > 0: __lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = slerp( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __lowercase = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __lowercase = content_text_input.input_ids.shape[-1] __lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" ) __lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt __lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowercase = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8) __lowercase = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps __lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to( self.device ) else: __lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) __lowercase = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __lowercase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __lowercase = {} if accepts_eta: __lowercase = eta # check if the scheduler accepts generator __lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: __lowercase = generator with self.progress_bar(total=UpperCAmelCase__ ): for i, t in enumerate(UpperCAmelCase__ ): # expand the latents if we are doing classifier free guidance __lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) # predict the noise residual __lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample # perform classifier free guidance if do_classifier_free_guidance: __lowercase ,__lowercase = noise_pred.chunk(2 ) __lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: __lowercase = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) __lowercase ,__lowercase = self.cond_fn( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) # compute the previous noisy sample x_t -> x_t-1 __lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 1 / 0.18_215 * latents __lowercase = self.vae.decode(UpperCAmelCase__ ).sample __lowercase = (image / 2 + 0.5).clamp(0, 1 ) __lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": __lowercase = self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
17
0
import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __A : """simple docstring""" @staticmethod def __lowercase ( *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" pass @is_pipeline_test @require_vision @require_torch class __A ( unittest.TestCase ): """simple docstring""" UpperCamelCase__ : str =MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : str =pipeline( 'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' ) __UpperCamelCase : Union[str, Any] =[ { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'candidate_labels': ['cat', 'remote', 'couch'], } ] return object_detector, examples def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Optional[Any] =object_detector(examples[0] , threshold=0.0 ) __UpperCamelCase : List[Any] =len(UpperCAmelCase__ ) self.assertGreater(UpperCAmelCase__ , 0 ) self.assertEqual( UpperCAmelCase__ , [ { 'score': ANY(UpperCAmelCase__ ), 'label': ANY(UpperCAmelCase__ ), 'box': {'xmin': ANY(UpperCAmelCase__ ), 'ymin': ANY(UpperCAmelCase__ ), 'xmax': ANY(UpperCAmelCase__ ), 'ymax': ANY(UpperCAmelCase__ )}, } for i in range(UpperCAmelCase__ ) ] , ) @require_tf @unittest.skip('Zero Shot Object Detection not implemented in TF' ) def __lowercase ( self ): """simple docstring""" pass @require_torch def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Union[str, Any] =pipeline( 'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' ) __UpperCamelCase : List[str] =object_detector( './tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}}, {'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}}, {'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}}, {'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}}, {'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}}, {'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}}, {'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}}, {'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}}, {'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}}, ] , ) __UpperCamelCase : str =object_detector( [ { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'candidate_labels': ['cat', 'remote', 'couch'], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ [ {'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}}, {'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}}, {'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}}, {'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}}, {'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}}, {'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}}, {'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}}, {'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}}, {'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}}, ] ] , ) @require_torch @slow def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Tuple =pipeline('zero-shot-object-detection' ) __UpperCamelCase : Dict =object_detector( 'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}}, {'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}}, {'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}, ] , ) __UpperCamelCase : Optional[Any] =object_detector( [ { 'image': 'http://images.cocodataset.org/val2017/000000039769.jpg', 'candidate_labels': ['cat', 'remote', 'couch'], }, { 'image': 'http://images.cocodataset.org/val2017/000000039769.jpg', 'candidate_labels': ['cat', 'remote', 'couch'], }, ] , ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ [ {'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}}, {'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}}, {'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}, ], [ {'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}}, {'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}}, {'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}, ], ] , ) @require_tf @unittest.skip('Zero Shot Object Detection not implemented in TF' ) def __lowercase ( self ): """simple docstring""" pass @require_torch @slow def __lowercase ( self ): """simple docstring""" __UpperCamelCase : int =0.2 __UpperCamelCase : Optional[int] =pipeline('zero-shot-object-detection' ) __UpperCamelCase : Tuple =object_detector( 'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=UpperCAmelCase__ , ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}}, {'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, ] , ) @require_torch @slow def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[Any] =2 __UpperCamelCase : str =pipeline('zero-shot-object-detection' ) __UpperCamelCase : List[Any] =object_detector( 'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=UpperCAmelCase__ , ) self.assertEqual( nested_simplify(UpperCAmelCase__ , decimals=4 ) , [ {'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}}, ] , )
71
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class _lowerCAmelCase : """simple docstring""" __UpperCAmelCase : Tuple = XGLMConfig __UpperCAmelCase : Optional[Any] = {} __UpperCAmelCase : Union[str, Any] = "gelu" def __init__( self : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=1_4, UpperCAmelCase__ : str=7, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[Any]=True, UpperCAmelCase__ : int=True, UpperCAmelCase__ : List[str]=9_9, UpperCAmelCase__ : Union[str, Any]=3_2, UpperCAmelCase__ : Union[str, Any]=2, UpperCAmelCase__ : Union[str, Any]=4, UpperCAmelCase__ : Tuple=3_7, UpperCAmelCase__ : List[Any]="gelu", UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Tuple=5_1_2, UpperCAmelCase__ : Optional[Any]=0.02, ): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_labels __lowercase = vocab_size __lowercase = d_model __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = ffn_dim __lowercase = activation_function __lowercase = activation_dropout __lowercase = attention_dropout __lowercase = max_position_embeddings __lowercase = initializer_range __lowercase = None __lowercase = 0 __lowercase = 2 __lowercase = 1 def _lowercase ( self : Union[str, Any] ): return XGLMConfig.from_pretrained("facebook/xglm-564M" ) def _lowercase ( self : Tuple ): __lowercase = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = self.get_config() __lowercase = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowercase ( self : List[Any] ): return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=UpperCAmelCase__, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=UpperCAmelCase__, ) def _lowercase ( self : Dict ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) = config_and_inputs __lowercase = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCAmelCase : Any = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : List[str] = False __UpperCAmelCase : int = False def _lowercase ( self : Optional[Any] ): __lowercase = TFXGLMModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__, n_embd=3_7 ) def _lowercase ( self : Any ): self.config_tester.run_common_tests() @slow def _lowercase ( self : List[str] ): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = TFXGLMModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." ) def _lowercase ( self : int ): super().test_resize_token_embeddings() @require_tf class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int]=True ): __lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) __lowercase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]], dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __lowercase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on __lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), UpperCAmelCase__ ) @slow def _lowercase ( self : List[Any] ): __lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) __lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) tf.random.set_seed(0 ) __lowercase = tokenizer("Today is a nice day and", return_tensors="tf" ) __lowercase = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0" ): __lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, seed=[7, 0] ) __lowercase = tokenizer.decode(output_ids[0], skip_special_tokens=UpperCAmelCase__ ) __lowercase = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ ) @slow def _lowercase ( self : Dict ): __lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) __lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) __lowercase = "left" # use different length sentences to test batching __lowercase = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] __lowercase = tokenizer(UpperCAmelCase__, return_tensors="tf", padding=UpperCAmelCase__ ) __lowercase = inputs["input_ids"] __lowercase = model.generate(input_ids=UpperCAmelCase__, attention_mask=inputs["attention_mask"], max_new_tokens=1_2 ) __lowercase = tokenizer(sentences[0], return_tensors="tf" ).input_ids __lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 ) __lowercase = tokenizer(sentences[1], return_tensors="tf" ).input_ids __lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 ) __lowercase = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ ) __lowercase = tokenizer.decode(output_non_padded[0], skip_special_tokens=UpperCAmelCase__ ) __lowercase = tokenizer.decode(output_padded[0], skip_special_tokens=UpperCAmelCase__ ) __lowercase = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__, [non_padded_sentence, padded_sentence] )
17
0
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[List[PIL.Image.Image], np.ndarray] _SCREAMING_SNAKE_CASE : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version('''>=''', '''0.0.12''') ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : np.ndarray _SCREAMING_SNAKE_CASE : List[bool] from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
254
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _a = '__DUMMY_TRANSFORMERS_USER__' _a = 'Dummy User' _a = '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' _a = 'https://hub-ci.huggingface.co' _a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}' _a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}' _a = Path('~/.huggingface/hub_ci_token').expanduser() @pytest.fixture def _A ( UpperCamelCase_ : List[Any]) -> Tuple: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : int) -> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT", UpperCamelCase_) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : str) -> Dict: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : List[Any]) -> List[str]: '''simple docstring''' HfFolder.save_token(UpperCamelCase_) yield HfFolder.delete_token() @pytest.fixture(scope="session") def _A ( ) -> List[Any]: '''simple docstring''' return HfApi(endpoint=UpperCamelCase_) @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi) -> List[Any]: '''simple docstring''' __lowercase = HfFolder.get_token() HfFolder.save_token(UpperCamelCase_) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : Dict) -> int: '''simple docstring''' def _cleanup_repo(UpperCamelCase_ : Optional[int]): hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") return _cleanup_repo @pytest.fixture def _A ( UpperCamelCase_ : str) -> Any: '''simple docstring''' @contextmanager def _temporary_repo(UpperCamelCase_ : Any): try: yield repo_id finally: cleanup_repo(UpperCamelCase_) return _temporary_repo @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : str, UpperCamelCase_ : Optional[int]) -> List[Any]: '''simple docstring''' __lowercase = F"""repo_txt_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data/text_data.txt", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Any, UpperCamelCase_ : Dict) -> Optional[int]: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]) -> int: '''simple docstring''' __lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Dict, UpperCamelCase_ : Any) -> int: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> List[Any]: '''simple docstring''' __lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> str: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
17
0
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> float: def get_matched_characters(_lowercase , _lowercase ) -> str: UpperCAmelCase : List[Any] = [] UpperCAmelCase : Tuple = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): UpperCAmelCase : List[str] = int(max(0 , i - limit ) ) UpperCAmelCase : Any = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(UpperCamelCase_ ) UpperCAmelCase : Optional[Any] = F'''{_stra[0:_stra.index(UpperCamelCase_ )]} {_stra[_stra.index(UpperCamelCase_ ) + 1:]}''' return "".join(UpperCamelCase_ ) # matching characters UpperCAmelCase : List[Any] = get_matched_characters(UpperCamelCase_ , UpperCamelCase_ ) UpperCAmelCase : Union[str, Any] = get_matched_characters(UpperCamelCase_ , UpperCamelCase_ ) UpperCAmelCase : List[Any] = len(UpperCamelCase_ ) # transposition UpperCAmelCase : Dict = ( len([(ca, ca) for ca, ca in zip(UpperCamelCase_ , UpperCamelCase_ ) if ca != ca] ) // 2 ) if not match_count: UpperCAmelCase : Tuple = 0.0 else: UpperCAmelCase : Any = ( 1 / 3 * ( match_count / len(UpperCamelCase_ ) + match_count / len(UpperCamelCase_ ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters UpperCAmelCase : Any = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler("""hello""", """world"""))
265
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : int = "time_series_transformer" __UpperCAmelCase : Any = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self : int, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : str = "student_t", UpperCAmelCase__ : str = "nll", UpperCAmelCase__ : int = 1, UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7], UpperCAmelCase__ : Optional[Union[str, bool]] = "mean", UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : str = "gelu", UpperCAmelCase__ : int = 6_4, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : int = 1_0_0, UpperCAmelCase__ : float = 0.02, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : List[str], ): # time series specific configuration __lowercase = prediction_length __lowercase = context_length or prediction_length __lowercase = distribution_output __lowercase = loss __lowercase = input_size __lowercase = num_time_features __lowercase = lags_sequence __lowercase = scaling __lowercase = num_dynamic_real_features __lowercase = num_static_real_features __lowercase = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) __lowercase = cardinality else: __lowercase = [0] if embedding_dimension and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) __lowercase = embedding_dimension else: __lowercase = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality] __lowercase = num_parallel_samples # Transformer architecture configuration __lowercase = input_size * len(UpperCAmelCase__ ) + self._number_of_features __lowercase = d_model __lowercase = encoder_attention_heads __lowercase = decoder_attention_heads __lowercase = encoder_ffn_dim __lowercase = decoder_ffn_dim __lowercase = encoder_layers __lowercase = decoder_layers __lowercase = dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = encoder_layerdrop __lowercase = decoder_layerdrop __lowercase = activation_function __lowercase = init_std __lowercase = use_cache super().__init__(is_encoder_decoder=UpperCAmelCase__, **UpperCAmelCase__ ) @property def _lowercase ( self : Optional[Any] ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
17
0
"""simple docstring""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class UpperCamelCase__: def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=7 ,__UpperCAmelCase=6 ,__UpperCAmelCase=17 ,__UpperCAmelCase=23 ,__UpperCAmelCase=11 ,__UpperCAmelCase=True ,) -> Optional[int]: A__ = parent A__ = batch_size A__ = seq_length A__ = act_dim A__ = state_dim A__ = hidden_size A__ = max_length A__ = is_training def snake_case__ ( self ) -> int: A__ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) A__ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) A__ = floats_tensor((self.batch_size, self.seq_length, 1) ) A__ = floats_tensor((self.batch_size, self.seq_length, 1) ) A__ = ids_tensor((self.batch_size, self.seq_length) ,vocab_size=10_00 ) A__ = random_attention_mask((self.batch_size, self.seq_length) ) A__ = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def snake_case__ ( self ) -> Optional[int]: return DecisionTransformerConfig( batch_size=self.batch_size ,seq_length=self.seq_length ,act_dim=self.act_dim ,state_dim=self.state_dim ,hidden_size=self.hidden_size ,max_length=self.max_length ,) def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> str: A__ = DecisionTransformerModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() A__ = model(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ) self.parent.assertEqual(result.state_preds.shape ,states.shape ) self.parent.assertEqual(result.action_preds.shape ,actions.shape ) self.parent.assertEqual(result.return_preds.shape ,returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def snake_case__ ( self ) -> Any: A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = { 'states': states, 'actions': actions, 'rewards': rewards, 'returns_to_go': returns_to_go, 'timesteps': timesteps, 'attention_mask': attention_mask, } return config, inputs_dict @require_torch class UpperCamelCase__( __A , __A , __A , unittest.TestCase ): lowerCAmelCase__ : Tuple = (DecisionTransformerModel,) if is_torch_available() else () lowerCAmelCase__ : int = () lowerCAmelCase__ : Tuple = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids lowerCAmelCase__ : str = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features lowerCAmelCase__ : str = False lowerCAmelCase__ : str = False lowerCAmelCase__ : int = False lowerCAmelCase__ : Any = False lowerCAmelCase__ : Union[str, Any] = False lowerCAmelCase__ : List[Any] = False lowerCAmelCase__ : Any = False lowerCAmelCase__ : Any = False lowerCAmelCase__ : Optional[int] = False def snake_case__ ( self ) -> List[str]: A__ = DecisionTransformerModelTester(self ) A__ = ConfigTester(self ,config_class=UpperCAmelCase__ ,hidden_size=37 ) def snake_case__ ( self ) -> int: self.config_tester.run_common_tests() def snake_case__ ( self ) -> Tuple: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) @slow def snake_case__ ( self ) -> Optional[Any]: for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = DecisionTransformerModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def snake_case__ ( self ) -> Tuple: A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCAmelCase__ ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = [ 'states', 'actions', 'rewards', 'returns_to_go', 'timesteps', 'attention_mask', ] self.assertListEqual(arg_names[: len(UpperCAmelCase__ )] ,UpperCAmelCase__ ) @require_torch class UpperCamelCase__( unittest.TestCase ): @slow def snake_case__ ( self ) -> Tuple: A__ = 2 # number of steps of autoregressive prediction we will perform A__ = 10 # defined by the RL environment, may be normalized A__ = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' ) A__ = model.to(UpperCAmelCase__ ) A__ = model.config torch.manual_seed(0 ) A__ = torch.randn(1 ,1 ,config.state_dim ).to(device=UpperCAmelCase__ ,dtype=torch.floataa ) # env.reset() A__ = torch.tensor( [[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] ,device=UpperCAmelCase__ ) A__ = torch.tensor(UpperCAmelCase__ ,device=UpperCAmelCase__ ,dtype=torch.floataa ).reshape(1 ,1 ,1 ) A__ = state A__ = torch.zeros(1 ,0 ,config.act_dim ,device=UpperCAmelCase__ ,dtype=torch.floataa ) A__ = torch.zeros(1 ,0 ,device=UpperCAmelCase__ ,dtype=torch.floataa ) A__ = torch.tensor(0 ,device=UpperCAmelCase__ ,dtype=torch.long ).reshape(1 ,1 ) for step in range(UpperCAmelCase__ ): A__ = torch.cat([actions, torch.zeros(1 ,1 ,config.act_dim ,device=UpperCAmelCase__ )] ,dim=1 ) A__ = torch.cat([rewards, torch.zeros(1 ,1 ,device=UpperCAmelCase__ )] ,dim=1 ) A__ = torch.ones(1 ,states.shape[1] ).to(dtype=torch.long ,device=states.device ) with torch.no_grad(): A__ , A__ , A__ = model( states=UpperCAmelCase__ ,actions=UpperCAmelCase__ ,rewards=UpperCAmelCase__ ,returns_to_go=UpperCAmelCase__ ,timesteps=UpperCAmelCase__ ,attention_mask=UpperCAmelCase__ ,return_dict=UpperCAmelCase__ ,) self.assertEqual(action_pred.shape ,actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] ,expected_outputs[step] ,atol=1e-4 ) ) A__ , A__ , A__ , A__ = ( # env.step(action) torch.randn(1 ,1 ,config.state_dim ).to(device=UpperCAmelCase__ ,dtype=torch.floataa ), 1.0, False, {}, ) A__ = action_pred[0, -1] A__ = torch.cat([states, state] ,dim=1 ) A__ = returns_to_go[0, -1] - reward A__ = torch.cat([returns_to_go, pred_return.reshape(1 ,1 ,1 )] ,dim=1 ) A__ = torch.cat( [timesteps, torch.ones((1, 1) ,device=UpperCAmelCase__ ,dtype=torch.long ) * (step + 1)] ,dim=1 )
221
"""simple docstring""" import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _lowerCAmelCase : """simple docstring""" @staticmethod def _lowercase ( *UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : List[Any] ): pass def _A ( UpperCamelCase_ : Union[str, Any]) -> Any: '''simple docstring''' return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. _a = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any] ): __lowercase = pipeline( "document-question-answering", model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ ) __lowercase = INVOICE_URL __lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) ) __lowercase = "What is the placebo?" __lowercase = [ { "image": load_image(UpperCAmelCase__ ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def _lowercase ( self : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any ): __lowercase = dqa_pipeline(UpperCAmelCase__, top_k=2 ) self.assertEqual( UpperCAmelCase__, [ [ {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )}, {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )}, ] ] * 3, ) @require_torch @require_detectrona @require_pytesseract def _lowercase ( self : Dict ): __lowercase = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2" ) __lowercase = INVOICE_URL __lowercase = "How many cats are there?" __lowercase = [ {"score": 0.0_001, "answer": "oy 2312/2019", "start": 3_8, "end": 3_9}, {"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 3_8, "end": 4_0}, ] __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably __lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual(UpperCAmelCase__, [] ) # We can optionnally pass directly the words and bounding boxes __lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png" __lowercase = [] __lowercase = [] __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, words=UpperCAmelCase__, boxes=UpperCAmelCase__, top_k=2 ) self.assertEqual(UpperCAmelCase__, [] ) @slow @require_torch @require_detectrona @require_pytesseract def _lowercase ( self : List[str] ): __lowercase = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ] * 2, ) @slow @require_torch @require_detectrona @require_pytesseract def _lowercase ( self : Dict ): __lowercase = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=5_0, ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6}, ] ] * 2, ) @slow @require_torch @require_pytesseract @require_vision def _lowercase ( self : Optional[Any] ): __lowercase = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ ) __lowercase = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ], ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ] ] * 2, ) __lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) ) # This model should also work if `image` is set to None __lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ], ) @slow @require_torch @require_pytesseract @require_vision def _lowercase ( self : Union[str, Any] ): __lowercase = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ ) __lowercase = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", max_seq_len=5_0, ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6}, ] ] * 2, ) __lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) ) # This model should also work if `image` is set to None __lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) @slow @require_torch def _lowercase ( self : Dict ): __lowercase = pipeline( "document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def _lowercase ( self : List[Any] ): pass
17
0
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList _UpperCamelCase = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif'''] class _lowerCamelCase ( a ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> List[Any]: '''simple docstring''' __snake_case : Optional[Any] = tokenizer __snake_case : Any = dataset __snake_case : Optional[int] = len(UpperCAmelCase__ ) if n_tasks is None else n_tasks __snake_case : List[Any] = n_copies def __iter__( self ) -> Tuple: '''simple docstring''' __snake_case : Any = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() ) __snake_case : Dict = self.tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors="pt" ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class _lowerCamelCase ( a ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __snake_case : str = start_length __snake_case : List[str] = eof_strings __snake_case : str = tokenizer def __call__( self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[str]: '''simple docstring''' __snake_case : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) __snake_case : List[Any] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(UpperCAmelCase__ ) def lowerCAmelCase__( lowercase : List[Any] ) -> int: __snake_case : List[str] = re.split("(%s)" % "|".join(UpperCamelCase_ ) , UpperCamelCase_ ) # last string should be "" return "".join(string_list[:-2] ) def lowerCAmelCase__( lowercase : Dict , lowercase : Dict , lowercase : Tuple , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[Any]=20 , **lowercase : int ) -> int: __snake_case : List[Any] = defaultdict(UpperCamelCase_ ) # dict of list of generated tokens for step, batch in tqdm(enumerate(UpperCamelCase_ ) ): with torch.no_grad(): __snake_case : Optional[Any] = batch["ids"].shape[-1] __snake_case : int = accelerator.unwrap_model(UpperCamelCase_ ).generate( input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=UpperCamelCase_ , **UpperCamelCase_ ) # each task is generated batch_size times __snake_case : Dict = batch["task_id"].repeat(UpperCamelCase_ ) __snake_case : List[str] = accelerator.pad_across_processes( UpperCamelCase_ , dim=1 , pad_index=tokenizer.pad_token_id ) __snake_case , __snake_case : Tuple = accelerator.gather((generated_tokens, generated_tasks) ) __snake_case : Tuple = generated_tokens.cpu().numpy() __snake_case : int = generated_tasks.cpu().numpy() for task, generated_tokens in zip(UpperCamelCase_ , UpperCamelCase_ ): gen_token_dict[task].append(UpperCamelCase_ ) __snake_case : Dict = [[] for _ in range(UpperCamelCase_ )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: __snake_case : Optional[int] = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) code_gens[task].append(remove_last_block(UpperCamelCase_ ) ) return code_gens def lowerCAmelCase__( ) -> Dict: __snake_case : Any = HfArgumentParser(UpperCamelCase_ ) __snake_case : Any = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric __snake_case : Tuple = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing __snake_case : Optional[int] = "false" if args.num_workers is None: __snake_case : Optional[int] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate __snake_case : str = Accelerator() set_seed(args.seed , device_specific=UpperCamelCase_ ) # Load model and tokenizer __snake_case : str = AutoTokenizer.from_pretrained(args.model_ckpt ) __snake_case : Optional[Any] = tokenizer.eos_token __snake_case : Union[str, Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings __snake_case : Dict = { "do_sample": args.do_sample, "temperature": args.temperature, "max_new_tokens": args.max_new_tokens, "top_p": args.top_p, "top_k": args.top_k, "stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , UpperCamelCase_ , UpperCamelCase_ )] ), } # Load evaluation dataset and metric __snake_case : Tuple = load_dataset("openai_humaneval" ) __snake_case : List[str] = load_metric("code_eval" ) __snake_case : List[str] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] ) __snake_case : int = args.n_samples // args.batch_size __snake_case : Optional[int] = TokenizedDataset(UpperCamelCase_ , human_eval["test"] , n_copies=UpperCamelCase_ , n_tasks=UpperCamelCase_ ) # do not confuse args.batch_size, which is actually the num_return_sequences __snake_case : Any = DataLoader(UpperCamelCase_ , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: __snake_case : Dict = code_eval_metric.compute(references=[""] , predictions=[[""]] ) except ValueError as exception: print( "Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`" " flag to enable code evaluation." ) raise exception __snake_case , __snake_case : Tuple = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ ) __snake_case : Dict = complete_code( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , n_tasks=UpperCamelCase_ , batch_size=args.batch_size , **UpperCamelCase_ , ) if accelerator.is_main_process: __snake_case : List[Any] = [] for task in tqdm(range(UpperCamelCase_ ) ): __snake_case : Tuple = human_eval["test"][task]["test"] __snake_case : Optional[Any] = f"""check({human_eval["test"][task]["entry_point"]})""" references.append("\n" + test_func + "\n" + entry_point ) # Evaluate completions with "code_eval" metric __snake_case , __snake_case : int = code_eval_metric.compute( references=UpperCamelCase_ , predictions=UpperCamelCase_ , num_workers=args.num_workers ) print(f"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file , "w" ) as fp: json.dump(UpperCamelCase_ , UpperCamelCase_ ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
326
"""simple docstring""" import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() _a = 2 class _lowerCAmelCase : """simple docstring""" def __init__( self : Dict, *, # begin keyword-only arguments UpperCAmelCase__ : str="<s>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : str="</s>", UpperCAmelCase__ : Optional[Any]="<unk>", UpperCAmelCase__ : List[Any]=None, ): __lowercase ,__lowercase ,__lowercase ,__lowercase = bos, unk, pad, eos __lowercase = [] __lowercase = [] __lowercase = {} __lowercase = self.add_symbol(UpperCAmelCase__ ) __lowercase = self.add_symbol(UpperCAmelCase__ ) __lowercase = self.add_symbol(UpperCAmelCase__ ) __lowercase = self.add_symbol(UpperCAmelCase__ ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(UpperCAmelCase__ ) __lowercase = len(self.symbols ) def __eq__( self : List[str], UpperCAmelCase__ : Dict ): return self.indices == other.indices def __getitem__( self : Optional[int], UpperCAmelCase__ : List[str] ): if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : str ): return len(self.symbols ) def __contains__( self : Any, UpperCAmelCase__ : Optional[Any] ): return sym in self.indices @classmethod def _lowercase ( cls : List[Any], UpperCAmelCase__ : Optional[Any] ): __lowercase = cls() d.add_from_file(UpperCAmelCase__ ) return d def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : str=False ): if word in self.indices and not overwrite: __lowercase = self.indices[word] __lowercase = self.count[idx] + n return idx else: __lowercase = len(self.symbols ) __lowercase = idx self.symbols.append(UpperCAmelCase__ ) self.count.append(UpperCAmelCase__ ) return idx def _lowercase ( self : Any, UpperCAmelCase__ : str ): return 0 def _lowercase ( self : Tuple, UpperCAmelCase__ : List[Any] ): if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): try: with open(UpperCAmelCase__, "r", encoding="utf-8" ) as fd: self.add_from_file(UpperCAmelCase__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCAmelCase__ ) ) return __lowercase = f.readlines() __lowercase = self._load_meta(UpperCAmelCase__ ) for line in lines[indices_start_line:]: try: __lowercase ,__lowercase = line.rstrip().rsplit(" ", 1 ) if field == "#fairseq:overwrite": __lowercase = True __lowercase ,__lowercase = line.rsplit(" ", 1 ) else: __lowercase = False __lowercase = int(UpperCAmelCase__ ) __lowercase = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(UpperCAmelCase__ ) ) self.add_symbol(UpperCAmelCase__, n=UpperCAmelCase__, overwrite=UpperCAmelCase__ ) except ValueError: raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" ) def _A ( UpperCamelCase_ : int) -> str: '''simple docstring''' __lowercase = dict((re.sub(r"@@$", "", UpperCamelCase_), v) if k.endswith("@@") else (re.sub(r"$", "</w>", UpperCamelCase_), v) for k, v in d.items()) __lowercase = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[F"""{k}</w>"""] __lowercase = d[k] # restore return da def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> List[Any]: '''simple docstring''' if not os.path.exists(UpperCamelCase_): raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""") os.makedirs(UpperCamelCase_, exist_ok=UpperCamelCase_) print(F"""Writing results to {pytorch_dump_folder_path}""") # handle various types of models __lowercase = os.path.join(UpperCamelCase_, "checkpoint.pt") if not os.path.isfile(UpperCamelCase_): raise ValueError(F"""path to the file {checkpoint_file} does not exist!""") __lowercase = torch.load(UpperCamelCase_, map_location="cpu") __lowercase = chkpt["cfg"]["model"] # dicts __lowercase = os.path.join(UpperCamelCase_, "dict.txt") if not os.path.isfile(UpperCamelCase_): raise ValueError(F"""path to the file {dict_file} does not exist!""") __lowercase = Dictionary.load(UpperCamelCase_) __lowercase = rewrite_dict_keys(src_dict.indices) __lowercase = len(UpperCamelCase_) __lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["vocab_file"]) print(F"""Generating {src_vocab_file} of {src_vocab_size} records""") with open(UpperCamelCase_, "w", encoding="utf-8") as f: f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_)) # merges_file (bpecodes) __lowercase = os.path.join(UpperCamelCase_, "bpecodes") if not os.path.isfile(UpperCamelCase_): raise ValueError(F"""path to the file {bpecodes_file} does not exist!""") __lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["merges_file"]) shutil.copyfile(UpperCamelCase_, UpperCamelCase_) # model config __lowercase = os.path.join(UpperCamelCase_, "config.json") __lowercase = { "activation_dropout": args["activation_dropout"], "architectures": ["BioGptForCausalLM"], "attention_probs_dropout_prob": args["attention_dropout"], "bos_token_id": 0, "eos_token_id": 2, "hidden_act": args["activation_fn"], "hidden_dropout_prob": args["dropout"], "hidden_size": args["decoder_embed_dim"], "initializer_range": 0.02, "intermediate_size": args["decoder_ffn_embed_dim"], "layer_norm_eps": 1E-12, "layerdrop": args["decoder_layerdrop"], "max_position_embeddings": args["max_target_positions"], "model_type": "biogpt", "num_attention_heads": args["decoder_attention_heads"], "num_hidden_layers": args["decoder_layers"], "pad_token_id": 1, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_decoder_input_output_embed"], "vocab_size": src_vocab_size, } # good hparam defaults to start with print(F"""Generating {biogpt_model_config_file}""") with open(UpperCamelCase_, "w", encoding="utf-8") as f: f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_)) # tokenizer config __lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_) __lowercase = { "bos_token": "<s>", "eos_token": "</s>", "model_max_length": 1024, "pad_token": "<pad>", "special_tokens_map_file": None, "tokenizer_class": "BioGptTokenizer", "unk_token": "<unk>", } print(F"""Generating {biogpt_tokenizer_config_file}""") with open(UpperCamelCase_, "w", encoding="utf-8") as f: f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_)) # model __lowercase = chkpt["model"] # remove unneeded keys __lowercase = [ "decoder.version", ] for k in ignore_keys: model_state_dict.pop(UpperCamelCase_, UpperCamelCase_) __lowercase = list(model_state_dict.keys()) for layer_name in layer_names: if layer_name.endswith("output_projection.weight"): __lowercase = model_state_dict.pop(UpperCamelCase_) else: __lowercase = model_state_dict.pop(UpperCamelCase_) __lowercase = BioGptConfig.from_pretrained(UpperCamelCase_) __lowercase = BioGptForCausalLM(UpperCamelCase_) # check that it loads ok model_new.load_state_dict(UpperCamelCase_) # save __lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_) print(F"""Generating {pytorch_weights_dump_path}""") torch.save(UpperCamelCase_, UpperCamelCase_) print("Conversion is done!") if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--biogpt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _a = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
17
0
def lowerCAmelCase_ ( snake_case_,snake_case_ ): return int((input_a, input_a).count(0 ) == 0 ) def lowerCAmelCase_ ( ): assert and_gate(0,0 ) == 0 assert and_gate(0,1 ) == 0 assert and_gate(1,0 ) == 0 assert and_gate(1,1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
26
"""simple docstring""" from __future__ import annotations from typing import Any class _lowerCAmelCase : """simple docstring""" def __init__( self : Any, UpperCAmelCase__ : int ): __lowercase = num_of_nodes __lowercase = [] __lowercase = {} def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ): self.m_edges.append([u_node, v_node, weight] ) def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def _lowercase ( self : List[Any], UpperCAmelCase__ : int ): if self.m_component[u_node] != u_node: for k in self.m_component: __lowercase = self.find_component(UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ): if component_size[u_node] <= component_size[v_node]: __lowercase = v_node component_size[v_node] += component_size[u_node] self.set_component(UpperCAmelCase__ ) elif component_size[u_node] >= component_size[v_node]: __lowercase = self.find_component(UpperCAmelCase__ ) component_size[u_node] += component_size[v_node] self.set_component(UpperCAmelCase__ ) def _lowercase ( self : Any ): __lowercase = [] __lowercase = 0 __lowercase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) __lowercase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: __lowercase ,__lowercase ,__lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): __lowercase = [u, v, w] for edge in minimum_weight_edge: if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase ,__lowercase ,__lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 __lowercase = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def _A ( ) -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
17
0
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=__lowerCAmelCase ) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : str = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCAmelCase : ClassVar[Features] = Features({'''image''': Image()} ) UpperCAmelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} ) UpperCAmelCase : str = "image" UpperCAmelCase : str = "labels" def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Optional[int] ): if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , UpperCAmelCase__ ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) _A = copy.deepcopy(self ) _A = self.label_schema.copy() _A = features[self.label_column] _A = label_schema return task_template @property def lowerCAmelCase_ ( self : Optional[Any] ): return { self.image_column: "image", self.label_column: "labels", }
315
"""simple docstring""" from math import sqrt def _A ( UpperCamelCase_ : int) -> int: '''simple docstring''' __lowercase = 0 for i in range(1, int(sqrt(UpperCamelCase_) + 1)): if n % i == 0 and i != sqrt(UpperCamelCase_): total += i + n // i elif i == sqrt(UpperCamelCase_): total += i return total - n def _A ( UpperCamelCase_ : int = 10000) -> int: '''simple docstring''' __lowercase = sum( i for i in range(1, UpperCamelCase_) if sum_of_divisors(sum_of_divisors(UpperCamelCase_)) == i and sum_of_divisors(UpperCamelCase_) != i) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
17
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ : Dict = logging.get_logger(__name__) lowercase__ : Optional[Any] = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class lowercase_ ( UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ : Any = "convbert" def __init__( self , __SCREAMING_SNAKE_CASE=30522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=9 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) ->str: super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = embedding_size lowerCAmelCase = head_ratio lowerCAmelCase = conv_kernel_size lowerCAmelCase = num_groups lowerCAmelCase = classifier_dropout class lowercase_ ( UpperCamelCase_ ): """simple docstring""" @property def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple: if self.task == "multiple-choice": lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
338
"""simple docstring""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _a = _symbol_database.Default() _a = _descriptor_pool.Default().AddSerializedFile( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) _a = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: _a = None _a = b'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" _a = 45 _a = 15_81 _a = 15_17 _a = 15_70 _a = 15_84 _a = 17_93 _a = 17_95 _a = 19_16 _a = 18_64 _a = 19_05 _a = 19_19 _a = 24_29 _a = 22_08 _a = 24_18 _a = 23_23 _a = 24_07 # @@protoc_insertion_point(module_scope)
17
0
'''simple docstring''' import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Optional[Any] ) -> List[Any]: """simple docstring""" if openai_config_file == "": __UpperCamelCase = OpenAIGPTConfig() else: __UpperCamelCase = OpenAIGPTConfig.from_json_file(UpperCamelCase_ ) __UpperCamelCase = OpenAIGPTModel(UpperCamelCase_ ) # Load weights from numpy load_tf_weights_in_openai_gpt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Save pytorch-model __UpperCamelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME __UpperCamelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(model.state_dict() , UpperCamelCase_ ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": a__ : Any =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--openai_checkpoint_folder_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--openai_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) a__ : Tuple =parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
53
"""simple docstring""" import baseaa def _A ( UpperCamelCase_ : str) -> bytes: '''simple docstring''' return baseaa.baaencode(string.encode("utf-8")) def _A ( UpperCamelCase_ : bytes) -> str: '''simple docstring''' return baseaa.baadecode(UpperCamelCase_).decode("utf-8") if __name__ == "__main__": _a = 'Hello World!' _a = baseaa_encode(test) print(encoded) _a = baseaa_decode(encoded) print(decoded)
17
0
from typing import TYPE_CHECKING from ..utils import _LazyModule __lowerCamelCase = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
59
"""simple docstring""" def _A ( UpperCamelCase_ : Any) -> List[str]: '''simple docstring''' __lowercase ,__lowercase = [], [] while len(UpperCamelCase_) > 1: __lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_) start.append(UpperCamelCase_) end.append(UpperCamelCase_) collection.remove(UpperCamelCase_) collection.remove(UpperCamelCase_) end.reverse() return start + collection + end if __name__ == "__main__": _a = input('Enter numbers separated by a comma:\n').strip() _a = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
17
0
from __future__ import annotations def A_ ( _UpperCAmelCase ): create_state_space_tree(UpperCamelCase_ , [] , 0 , [0 for i in range(len(UpperCamelCase_ ) )] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): if index == len(UpperCamelCase_ ): print(UpperCamelCase_ ) return for i in range(len(UpperCamelCase_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) SCREAMING_SNAKE_CASE_: List[Any] = True create_state_space_tree(UpperCamelCase_ , UpperCamelCase_ , index + 1 , UpperCamelCase_ ) current_sequence.pop() SCREAMING_SNAKE_CASE_: Optional[Any] = False lowerCAmelCase : Optional[int] = [3, 1, 2, 4] generate_all_permutations(sequence) lowerCAmelCase : List[Any] = ["""A""", """B""", """C"""] generate_all_permutations(sequence_a)
13
"""simple docstring""" def _A ( UpperCamelCase_ : list[int]) -> float: '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("List is empty") __lowercase = sum(UpperCamelCase_) / len(UpperCamelCase_) # Calculate the average return sum(abs(x - average) for x in nums) / len(UpperCamelCase_) if __name__ == "__main__": import doctest doctest.testmod()
17
0
from __future__ import annotations from typing import Any class __A : """simple docstring""" def __init__( self , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Optional[Any] =num_of_nodes __UpperCamelCase : Tuple =[] __UpperCamelCase : Optional[int] ={} def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" self.m_edges.append([u_node, v_node, weight] ) def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" if self.m_component[u_node] != u_node: for k in self.m_component: __UpperCamelCase : Any =self.find_component(UpperCAmelCase__ ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if component_size[u_node] <= component_size[v_node]: __UpperCamelCase : Dict =v_node component_size[v_node] += component_size[u_node] self.set_component(UpperCAmelCase__ ) elif component_size[u_node] >= component_size[v_node]: __UpperCamelCase : int =self.find_component(UpperCAmelCase__ ) component_size[u_node] += component_size[v_node] self.set_component(UpperCAmelCase__ ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =[] __UpperCamelCase : List[Any] =0 __UpperCamelCase : int =[-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) __UpperCamelCase : Any =self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] =edge __UpperCamelCase : Optional[Any] =self.m_component[u] __UpperCamelCase : List[Any] =self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): __UpperCamelCase : List[str] =[u, v, w] for edge in minimum_weight_edge: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] =edge __UpperCamelCase : Union[str, Any] =self.m_component[u] __UpperCamelCase : Dict =self.m_component[v] if u_component != v_component: mst_weight += w self.union(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n' ) num_of_components -= 1 __UpperCamelCase : Optional[int] =[-1] * self.m_num_of_nodes print(f'The total weight of the minimal spanning tree is: {mst_weight}' ) def A ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
71
"""simple docstring""" import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ): __lowercase = parent __lowercase = vocab_size __lowercase = batch_size __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = is_training __lowercase = use_labels __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = type_sequence_label_size __lowercase = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase = (image_size // patch_size) ** 2 __lowercase = num_patches + 1 def _lowercase ( self : int ): __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size ) __lowercase = BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, ) return config, pixel_values, labels def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ): __lowercase = FlaxBeitModel(config=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ): __lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ): __lowercase = self.type_sequence_label_size __lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowercase = 1 __lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ ) __lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowercase = model(UpperCAmelCase__ ) def _lowercase ( self : List[str] ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) = config_and_inputs __lowercase = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class _lowerCAmelCase ( lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def _lowercase ( self : List[Any] ): __lowercase = FlaxBeitModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 ) def _lowercase ( self : Union[str, Any] ): self.config_tester.run_common_tests() def _lowercase ( self : Optional[int] ): __lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(UpperCAmelCase__ ) __lowercase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ["pixel_values"] self.assertListEqual(arg_names[:1], UpperCAmelCase__ ) def _lowercase ( self : Tuple ): __lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = model_class(UpperCAmelCase__ ) @jax.jit def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ): return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ ) with self.subTest("JIT Enabled" ): __lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ): self.assertEqual(jitted_output.shape, output.shape ) def _lowercase ( self : List[str] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def _lowercase ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def _lowercase ( self : Tuple ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def _lowercase ( self : Union[str, Any] ): for model_class_name in self.all_model_classes: __lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" ) __lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) ) self.assertIsNotNone(UpperCAmelCase__ ) def _A ( ) -> str: '''simple docstring''' __lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @require_flax class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _lowercase ( self : Any ): __lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values # prepare bool_masked_pos __lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ ) # forward pass __lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ ) __lowercase = outputs.logits # verify the logits __lowercase = (1, 1_9_6, 8_1_9_2) self.assertEqual(logits.shape, UpperCAmelCase__ ) __lowercase = np.array( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) ) @slow def _lowercase ( self : Any ): __lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ) # forward pass __lowercase = model(**UpperCAmelCase__ ) __lowercase = outputs.logits # verify the logits __lowercase = (1, 1_0_0_0) self.assertEqual(logits.shape, UpperCAmelCase__ ) __lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] ) self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) ) __lowercase = 2_8_1 self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ ) @slow def _lowercase ( self : List[str] ): __lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ) # forward pass __lowercase = model(**UpperCAmelCase__ ) __lowercase = outputs.logits # verify the logits __lowercase = (1, 2_1_8_4_1) self.assertEqual(logits.shape, UpperCAmelCase__ ) __lowercase = np.array([1.6_881, -0.2_787, 0.5_901] ) self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) ) __lowercase = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
17
0
'''simple docstring''' import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _A ( __SCREAMING_SNAKE_CASE ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : str = parent __UpperCAmelCase : Optional[int] = batch_size __UpperCAmelCase : List[str] = seq_length __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : Any = use_input_mask __UpperCAmelCase : Union[str, Any] = use_token_type_ids __UpperCAmelCase : Any = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Tuple = hidden_size __UpperCAmelCase : List[Any] = num_hidden_layers __UpperCAmelCase : Optional[Any] = num_attention_heads __UpperCAmelCase : Optional[Any] = intermediate_size __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : Any = hidden_dropout_prob __UpperCAmelCase : Any = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Union[str, Any] = type_vocab_size __UpperCAmelCase : Optional[Any] = type_sequence_label_size __UpperCAmelCase : Dict = initializer_range __UpperCAmelCase : Dict = num_labels __UpperCAmelCase : Union[str, Any] = num_choices __UpperCAmelCase : Optional[int] = scope def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : str = None if self.use_input_mask: __UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : str = None if self.use_labels: __UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> str: '''simple docstring''' return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Tuple = DistilBertModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __UpperCAmelCase : List[str] = model(UpperCAmelCase__ , UpperCAmelCase__ ) __UpperCAmelCase : List[str] = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Dict = DistilBertForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __UpperCAmelCase : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Tuple = DistilBertForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __UpperCAmelCase : Dict = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.num_labels __UpperCAmelCase : Tuple = DistilBertForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __UpperCAmelCase : Union[str, Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = DistilBertForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __UpperCAmelCase : Any = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.num_choices __UpperCAmelCase : Dict = DistilBertForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __UpperCAmelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Optional[int] = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() ((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : List[str] = config_and_inputs __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : int = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) _SCREAMING_SNAKE_CASE : List[Any] = ( { "feature-extraction": DistilBertModel, "fill-mask": DistilBertForMaskedLM, "question-answering": DistilBertForQuestionAnswering, "text-classification": DistilBertForSequenceClassification, "token-classification": DistilBertForTokenClassification, "zero-shot": DistilBertForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Union[str, Any] = True _SCREAMING_SNAKE_CASE : Optional[Any] = True _SCREAMING_SNAKE_CASE : Union[str, Any] = True _SCREAMING_SNAKE_CASE : str = True def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = DistilBertModelTester(self ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCAmelCase__ , dim=37 ) def __A ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ ) def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : List[str] = DistilBertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @slow @require_torch_gpu def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : Optional[Any] = model_class(config=UpperCAmelCase__ ) __UpperCAmelCase : Any = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) __UpperCAmelCase : Optional[Any] = torch.jit.trace( UpperCAmelCase__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , """traced_model.pt""" ) ) __UpperCAmelCase : List[str] = torch.jit.load(os.path.join(UpperCAmelCase__ , """traced_model.pt""" ) , map_location=UpperCAmelCase__ ) loaded(inputs_dict["""input_ids"""].to(UpperCAmelCase__ ) , inputs_dict["""attention_mask"""].to(UpperCAmelCase__ ) ) @require_torch class _A ( unittest.TestCase ): @slow def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Tuple = DistilBertModel.from_pretrained("""distilbert-base-uncased""" ) __UpperCAmelCase : Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) __UpperCAmelCase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __UpperCAmelCase : List[str] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] __UpperCAmelCase : Tuple = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __UpperCAmelCase : int = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
254
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _lowerCAmelCase ( unittest.TestCase ,lowercase ): """simple docstring""" def _lowercase ( self : List[Any] ): __lowercase = load_tool("text-classification" ) self.tool.setup() __lowercase = load_tool("text-classification", remote=UpperCAmelCase__ ) def _lowercase ( self : str ): __lowercase = self.tool("That's quite cool", ["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" ) def _lowercase ( self : str ): __lowercase = self.remote_tool("That's quite cool", ["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" ) def _lowercase ( self : List[str] ): __lowercase = self.tool(text="That's quite cool", labels=["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" ) def _lowercase ( self : Tuple ): __lowercase = self.remote_tool(text="That's quite cool", labels=["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" )
17
0
'''simple docstring''' def __lowerCamelCase ( _lowercase = 1_0 , _lowercase = 1_0_0_0 , _lowercase = True ) -> int: assert ( isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" ) return min_val if option else max_val def __lowerCamelCase ( _lowercase , _lowercase ) -> int: return int((number_a + number_a) / 2 ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> None: assert ( isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("""argument value for lower and higher must be(lower > higher)""" ) if not lower < to_guess < higher: raise ValueError( """guess value must be within the range of lower and higher value""" ) def answer(_lowercase ) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("""started...""" ) UpperCAmelCase : Dict = lower UpperCAmelCase : Tuple = higher UpperCAmelCase : Optional[Any] = [] while True: UpperCAmelCase : Optional[Any] = get_avg(UpperCamelCase_ , UpperCamelCase_ ) last_numbers.append(UpperCamelCase_ ) if answer(UpperCamelCase_ ) == "low": UpperCAmelCase : Union[str, Any] = number elif answer(UpperCamelCase_ ) == "high": UpperCAmelCase : int = number else: break print(F'''guess the number : {last_numbers[-1]}''' ) print(F'''details : {last_numbers!s}''' ) def __lowerCamelCase ( ) -> None: UpperCAmelCase : List[Any] = int(input("""Enter lower value : """ ).strip() ) UpperCAmelCase : List[str] = int(input("""Enter high value : """ ).strip() ) UpperCAmelCase : Union[str, Any] = int(input("""Enter value to guess : """ ).strip() ) guess_the_number(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if __name__ == "__main__": main()
265
"""simple docstring""" from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker _a = 'CompVis/stable-diffusion-v1-1' _a = 'CompVis/stable-diffusion-v1-2' _a = 'CompVis/stable-diffusion-v1-3' _a = 'CompVis/stable-diffusion-v1-4' class _lowerCAmelCase ( lowercase ): """simple docstring""" def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCAmelCase__ : StableDiffusionSafetyChecker, UpperCAmelCase__ : CLIPImageProcessor, UpperCAmelCase__ : bool = True, ): super()._init_() __lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ ) __lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ ) __lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ ) __lowercase = StableDiffusionPipeline( vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, requires_safety_checker=UpperCAmelCase__, ) self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea ) @property def _lowercase ( self : List[str] ): return {k: getattr(self, UpperCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )} def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowercase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase__ ) def _lowercase ( self : List[str] ): self.enable_attention_slicing(UpperCAmelCase__ ) @torch.no_grad() def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Tuple, ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : str, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Any, ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Optional[int], ): return self.pipea( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) @torch.no_grad() def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ): __lowercase = "cuda" if torch.cuda.is_available() else "cpu" self.to(UpperCAmelCase__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get first result from Stable Diffusion Checkpoint v1.2 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get first result from Stable Diffusion Checkpoint v1.3 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get first result from Stable Diffusion Checkpoint v1.4 __lowercase = self.textaimg_sda_a( prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
17
0
"""simple docstring""" __lowerCamelCase = 8.3_1_4_4_5_9_8 def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" if temperature < 0: raise Exception('Temperature cannot be less than 0 K' ) if molar_mass <= 0: raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example __lowerCamelCase = 3_00 __lowerCamelCase = 28 __lowerCamelCase = rms_speed_of_molecule(temperature, molar_mass) print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
221
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _lowerCAmelCase ( lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = "ssube/stable-diffusion-x4-upscaler-onnx" def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[str]=0 ): __lowercase = floats_tensor((1, 3, 1_2_8, 1_2_8), rng=random.Random(UpperCAmelCase__ ) ) __lowercase = torch.manual_seed(UpperCAmelCase__ ) __lowercase = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def _lowercase ( self : Any ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def _lowercase ( self : Optional[Any] ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowercase ( self : int ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowercase ( self : str ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _lowercase ( self : Any ): __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" ) __lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = self.get_dummy_inputs() __lowercase = pipe(**UpperCAmelCase__ ).images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @property def _lowercase ( self : Tuple ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowercase ( self : Dict ): __lowercase = ort.SessionOptions() __lowercase = False return options def _lowercase ( self : Dict ): __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowercase = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = "A fantasy landscape, trending on artstation" __lowercase = torch.manual_seed(0 ) __lowercase = pipe( prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=UpperCAmelCase__, output_type="np", ) __lowercase = output.images __lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _lowercase ( self : str ): __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) __lowercase = init_image.resize((1_2_8, 1_2_8) ) __lowercase = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" ) __lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", scheduler=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = "A fantasy landscape, trending on artstation" __lowercase = torch.manual_seed(0 ) __lowercase = pipe( prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=UpperCAmelCase__, output_type="np", ) __lowercase = output.images __lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array( [0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
17
0
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) _UpperCamelCase = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='''relu''')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='''relu''')) classifier.add(layers.Dense(units=1, activation='''sigmoid''')) # Compiling the CNN classifier.compile( optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy'''] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') _UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) _UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) _UpperCamelCase = train_datagen.flow_from_directory( '''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) _UpperCamelCase = test_datagen.flow_from_directory( '''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('''cnn.h5''') # Part 3 - Making new predictions _UpperCamelCase = tf.keras.preprocessing.image.load_img( '''dataset/single_prediction/image.png''', target_size=(64, 64) ) _UpperCamelCase = tf.keras.preprocessing.image.img_to_array(test_image) _UpperCamelCase = np.expand_dims(test_image, axis=0) _UpperCamelCase = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: _UpperCamelCase = '''Normal''' if result[0][0] == 1: _UpperCamelCase = '''Abnormality detected'''
326
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _a = datasets.utils.logging.get_logger(__name__) _a = ['names', 'prefix'] _a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] _a = ['encoding_errors', 'on_bad_lines'] _a = ['date_format'] @dataclass class _lowerCAmelCase ( datasets.BuilderConfig ): """simple docstring""" __UpperCAmelCase : str = "," __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[Union[int, List[int], str]] = "infer" __UpperCAmelCase : Optional[List[str]] = None __UpperCAmelCase : Optional[List[str]] = None __UpperCAmelCase : Optional[Union[int, str, List[int], List[str]]] = None __UpperCAmelCase : Optional[Union[List[int], List[str]]] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : bool = True __UpperCAmelCase : Optional[Literal["c", "python", "pyarrow"]] = None __UpperCAmelCase : Dict[Union[int, str], Callable[[Any], Any]] = None __UpperCAmelCase : Optional[list] = None __UpperCAmelCase : Optional[list] = None __UpperCAmelCase : bool = False __UpperCAmelCase : Optional[Union[int, List[int]]] = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[Union[str, List[str]]] = None __UpperCAmelCase : bool = True __UpperCAmelCase : bool = True __UpperCAmelCase : bool = False __UpperCAmelCase : bool = True __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : str = "." __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : str = '"' __UpperCAmelCase : int = 0 __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : bool = True __UpperCAmelCase : bool = True __UpperCAmelCase : int = 0 __UpperCAmelCase : bool = True __UpperCAmelCase : bool = False __UpperCAmelCase : Optional[str] = None __UpperCAmelCase : int = 1_0_0_0_0 __UpperCAmelCase : Optional[datasets.Features] = None __UpperCAmelCase : Optional[str] = "strict" __UpperCAmelCase : Literal["error", "warn", "skip"] = "error" __UpperCAmelCase : Optional[str] = None def _lowercase ( self : Tuple ): if self.delimiter is not None: __lowercase = self.delimiter if self.column_names is not None: __lowercase = self.column_names @property def _lowercase ( self : Union[str, Any] ): __lowercase = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), UpperCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class _lowerCAmelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" __UpperCAmelCase : Tuple = CsvConfig def _lowercase ( self : List[str] ): return datasets.DatasetInfo(features=self.config.features ) def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) __lowercase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase__, (str, list, tuple) ): __lowercase = data_files if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = [files] __lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )] __lowercase = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = [files] __lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase__, gen_kwargs={"files": files} ) ) return splits def _lowercase ( self : Dict, UpperCAmelCase__ : pa.Table ): if self.config.features is not None: __lowercase = self.config.features.arrow_schema if all(not require_storage_cast(UpperCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast __lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=UpperCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example __lowercase = table_cast(UpperCAmelCase__, UpperCAmelCase__ ) return pa_table def _lowercase ( self : Optional[Any], UpperCAmelCase__ : List[str] ): __lowercase = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str __lowercase = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase__ ) else object for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ): __lowercase = pd.read_csv(UpperCAmelCase__, iterator=UpperCAmelCase__, dtype=UpperCAmelCase__, **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(UpperCAmelCase__ ): __lowercase = pa.Table.from_pandas(UpperCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__ ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase__ )}: {e}""" ) raise
17
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _snake_case = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ["FNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ["FNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", "FNetForPreTraining", "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
"""simple docstring""" from scipy.stats import spearmanr import datasets _a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n' _a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n' _a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): """simple docstring""" def _lowercase ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], ) def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ): __lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
17
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Dict = LDMTextToImagePipeline UpperCAmelCase : str = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } UpperCAmelCase : Optional[Any] = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } UpperCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS UpperCAmelCase : List[str] = False def lowerCAmelCase_ ( self : str ): torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) _A = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) _A = CLIPTextModel(UpperCAmelCase__ ) _A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _A = { 'unet': unet, 'scheduler': scheduler, 'vqvae': vae, 'bert': text_encoder, 'tokenizer': tokenizer, } return components def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any]=0 ): if str(UpperCAmelCase__ ).startswith('mps' ): _A = torch.manual_seed(UpperCAmelCase__ ) else: _A = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) _A = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def lowerCAmelCase_ ( self : int ): _A = 'cpu' # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = LDMTextToImagePipeline(**UpperCAmelCase__ ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _A = self.get_dummy_inputs(UpperCAmelCase__ ) _A = pipe(**UpperCAmelCase__ ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) _A = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict=torch.floataa , _UpperCAmelCase : Dict=0 ): _A = torch.manual_seed(UpperCAmelCase__ ) _A = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 32, 32) ) _A = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ) _A = { 'prompt': 'A painting of a squirrel eating a burger', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def lowerCAmelCase_ ( self : Tuple ): _A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _A = self.get_inputs(UpperCAmelCase__ ) _A = pipe(**UpperCAmelCase__ ).images _A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) _A = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] ) _A = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : List[str] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=torch.floataa , _UpperCAmelCase : List[str]=0 ): _A = torch.manual_seed(UpperCAmelCase__ ) _A = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 32, 32) ) _A = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ) _A = { 'prompt': 'A painting of a squirrel eating a burger', 'latents': latents, 'generator': generator, 'num_inference_steps': 50, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def lowerCAmelCase_ ( self : int ): _A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _A = self.get_inputs(UpperCAmelCase__ ) _A = pipe(**UpperCAmelCase__ ).images[0] _A = load_numpy( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' ) _A = np.abs(expected_image - image ).max() assert max_diff < 1E-3
315
"""simple docstring""" from collections.abc import Sequence def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_)) def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float: '''simple docstring''' __lowercase = 0.0 for coeff in reversed(UpperCamelCase_): __lowercase = result * x + coeff return result if __name__ == "__main__": _a = (0.0, 0.0, 5.0, 9.3, 7.0) _a = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
17
0
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin lowercase__ : str = '''\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n''' class lowercase_ ( unittest.TestCase , UpperCamelCase_ ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: lowerCAmelCase = load_tool('''text-question-answering''' ) self.tool.setup() lowerCAmelCase = load_tool('''text-question-answering''' , remote=UpperCAmelCase__ ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.tool(UpperCAmelCase__ , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: lowerCAmelCase = self.remote_tool(UpperCAmelCase__ , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]: lowerCAmelCase = self.tool(text=UpperCAmelCase__ , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''' ) def SCREAMING_SNAKE_CASE_ ( self ) ->Dict: lowerCAmelCase = self.remote_tool(text=UpperCAmelCase__ , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''' )
338
"""simple docstring""" import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class _lowerCAmelCase ( pl.LightningModule ): """simple docstring""" def __init__( self : Optional[Any], UpperCAmelCase__ : str ): super().__init__() __lowercase = model __lowercase = 2 __lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels ) def _lowercase ( self : Optional[int] ): pass def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str: '''simple docstring''' __lowercase = LongformerModel.from_pretrained(UpperCamelCase_) __lowercase = LightningModel(UpperCamelCase_) __lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu")) lightning_model.load_state_dict(ckpt["state_dict"]) # init longformer question answering model __lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict()) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict()) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(UpperCamelCase_) print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""") if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
17
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : Optional[Any] ={ '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict =[ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys a__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[int] ): if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, ) assert hasattr(self, "env" ) def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ): # configuration for running training on smdistributed Model Parallel __lowercase = { "enabled": True, "processes_per_host": 8, } __lowercase = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } __lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} __lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 5_0_0, }, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", ) def _lowercase ( self : Tuple, UpperCAmelCase__ : int ): TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ): # create estimator __lowercase = self.create_estimator(UpperCAmelCase__ ) # run training estimator.fit() # result dataframe __lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis __lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) __lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping __lowercase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
17
0
from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class UpperCAmelCase : def __init__(self : List[str] , snake_case__ : Tuple , ) -> Dict: '''simple docstring''' snake_case : Dict = parent snake_case : Optional[Any] = 13 snake_case : Dict = 7 snake_case : Optional[Any] = True snake_case : Optional[Any] = True snake_case : Optional[Any] = False snake_case : int = True snake_case : int = 99 snake_case : List[Any] = 32 snake_case : str = 2 snake_case : Optional[Any] = 4 snake_case : int = 37 snake_case : Any = "gelu" snake_case : List[str] = 0.1 snake_case : Union[str, Any] = 0.1 snake_case : Any = 5_12 snake_case : List[Any] = 16 snake_case : Any = 2 snake_case : Optional[Any] = 0.02 snake_case : List[str] = 3 snake_case : Any = 4 snake_case : Dict = None def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple: '''simple docstring''' snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case : Optional[Any] = None if self.use_input_mask: snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case : Optional[Any] = None snake_case : Optional[int] = None snake_case : Tuple = None if self.use_labels: snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case : Tuple = ids_tensor([self.batch_size] , self.num_choices ) snake_case : Optional[int] = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Optional[int] ) -> int: '''simple docstring''' snake_case : List[str] = TFDistilBertModel(config=UpperCAmelCase__ ) snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask} snake_case : Optional[int] = model(UpperCAmelCase__ ) snake_case : int = [input_ids, input_mask] snake_case : Union[str, Any] = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> Optional[int]: '''simple docstring''' snake_case : str = TFDistilBertForMaskedLM(config=UpperCAmelCase__ ) snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask} snake_case : Union[str, Any] = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Dict , snake_case__ : Any ) -> int: '''simple docstring''' snake_case : Optional[Any] = TFDistilBertForQuestionAnswering(config=UpperCAmelCase__ ) snake_case : Tuple = { "input_ids": input_ids, "attention_mask": input_mask, } snake_case : Optional[int] = model(UpperCAmelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Any ) -> Union[str, Any]: '''simple docstring''' snake_case : Union[str, Any] = self.num_labels snake_case : List[str] = TFDistilBertForSequenceClassification(UpperCAmelCase__ ) snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask} snake_case : Dict = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Tuple ) -> str: '''simple docstring''' snake_case : Dict = self.num_choices snake_case : Tuple = TFDistilBertForMultipleChoice(UpperCAmelCase__ ) snake_case : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) ) snake_case : Dict = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) ) snake_case : Tuple = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, } snake_case : Tuple = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[Any] ) -> int: '''simple docstring''' snake_case : Any = self.num_labels snake_case : Optional[Any] = TFDistilBertForTokenClassification(UpperCAmelCase__ ) snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask} snake_case : Dict = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> Dict: '''simple docstring''' snake_case : Optional[int] = self.prepare_config_and_inputs() ((snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case) , (snake_case)) : List[str] = config_and_inputs snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ): A__ : int = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) A__ : Union[str, Any] = ( { "feature-extraction": TFDistilBertModel, "fill-mask": TFDistilBertForMaskedLM, "question-answering": TFDistilBertForQuestionAnswering, "text-classification": TFDistilBertForSequenceClassification, "token-classification": TFDistilBertForTokenClassification, "zero-shot": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) A__ : str = False A__ : List[str] = False def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str: '''simple docstring''' snake_case : Dict = TFDistilBertModelTester(self ) snake_case : List[Any] = ConfigTester(self , config_class=UpperCAmelCase__ , dim=37 ) def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict: '''simple docstring''' snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> int: '''simple docstring''' snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ ) def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Tuple: '''simple docstring''' snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ ) def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]: '''simple docstring''' snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ ) def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int: '''simple docstring''' snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ ) def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]: '''simple docstring''' snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ ) @slow def _SCREAMING_SNAKE_CASE (self : str ) -> List[Any]: '''simple docstring''' for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): snake_case : int = TFDistilBertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_tf class UpperCAmelCase ( unittest.TestCase ): @slow def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Tuple: '''simple docstring''' snake_case : List[str] = TFDistilBertModel.from_pretrained("distilbert-base-uncased" ) snake_case : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) snake_case : Optional[int] = model(UpperCAmelCase__ )[0] snake_case : Any = [1, 6, 7_68] self.assertEqual(output.shape , UpperCAmelCase__ ) snake_case : List[str] = tf.constant( [ [ [0.19261885, -0.13732955, 0.4119799], [0.22150156, -0.07422661, 0.39037204], [0.22756018, -0.0896414, 0.3701467], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4 )
59
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : Tuple = "openai/whisper-base" __UpperCAmelCase : Union[str, Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __UpperCAmelCase : List[str] = "transcriber" __UpperCAmelCase : Optional[Any] = WhisperProcessor __UpperCAmelCase : str = WhisperForConditionalGeneration __UpperCAmelCase : List[str] = ["audio"] __UpperCAmelCase : Tuple = ["text"] def _lowercase ( self : str, UpperCAmelCase__ : int ): return self.pre_processor(UpperCAmelCase__, return_tensors="pt" ).input_features def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ): return self.model.generate(inputs=UpperCAmelCase__ ) def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int] ): return self.pre_processor.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )[0]
17
0
import math def A_ ( _UpperCAmelCase ): assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False SCREAMING_SNAKE_CASE_: Optional[Any] = range(3 , int(math.sqrt(UpperCamelCase_ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def A_ ( _UpperCAmelCase , _UpperCAmelCase=1 , **_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Union[str, Any] = factor * value SCREAMING_SNAKE_CASE_: List[Any] = value while not is_prime(UpperCamelCase_ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **UpperCamelCase_ ) return value
13
"""simple docstring""" import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]: '''simple docstring''' if isinstance(UpperCamelCase_, torch.Tensor): return image elif isinstance(UpperCamelCase_, PIL.Image.Image): __lowercase = [image] if isinstance(image[0], PIL.Image.Image): __lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] __lowercase = np.concatenate(UpperCamelCase_, axis=0) __lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0 __lowercase = image.transpose(0, 3, 1, 2) __lowercase = 2.0 * image - 1.0 __lowercase = torch.from_numpy(UpperCamelCase_) elif isinstance(image[0], torch.Tensor): __lowercase = torch.cat(UpperCamelCase_, dim=0) return image def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int: '''simple docstring''' if not isinstance(UpperCamelCase_, np.ndarray): __lowercase = True __lowercase = va.device __lowercase = va.cpu().numpy() __lowercase = va.cpu().numpy() __lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_))) if np.abs(UpperCamelCase_) > DOT_THRESHOLD: __lowercase = (1 - t) * va + t * va else: __lowercase = np.arccos(UpperCamelCase_) __lowercase = np.sin(UpperCamelCase_) __lowercase = theta_a * t __lowercase = np.sin(UpperCamelCase_) __lowercase = np.sin(theta_a - theta_t) / sin_theta_a __lowercase = sin_theta_t / sin_theta_a __lowercase = sa * va + sa * va if inputs_are_torch: __lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_) return va def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int: '''simple docstring''' __lowercase = F.normalize(UpperCamelCase_, dim=-1) __lowercase = F.normalize(UpperCamelCase_, dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]: '''simple docstring''' for param in model.parameters(): __lowercase = value class _lowerCAmelCase ( lowercase ): """simple docstring""" def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ): super().__init__() self.register_modules( vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, ) __lowercase = ( feature_extractor.size if isinstance(feature_extractor.size, UpperCAmelCase__ ) else feature_extractor.size["shortest_edge"] ) __lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std ) set_requires_grad(self.text_encoder, UpperCAmelCase__ ) set_requires_grad(self.clip_model, UpperCAmelCase__ ) def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowercase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase__ ) def _lowercase ( self : int ): self.enable_attention_slicing(UpperCAmelCase__ ) def _lowercase ( self : str ): set_requires_grad(self.vae, UpperCAmelCase__ ) def _lowercase ( self : Any ): set_requires_grad(self.vae, UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any] ): set_requires_grad(self.unet, UpperCAmelCase__ ) def _lowercase ( self : Any ): set_requires_grad(self.unet, UpperCAmelCase__ ) def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ): # get the original timestep using init_timestep __lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ ) __lowercase = max(num_inference_steps - init_timestep, 0 ) __lowercase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ): if not isinstance(UpperCAmelCase__, torch.Tensor ): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" ) __lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ ) ] __lowercase = torch.cat(UpperCAmelCase__, dim=0 ) else: __lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 0.18_215 * init_latents __lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 ) __lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ ) # get latents __lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = init_latents return latents def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ): __lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): __lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) ) __lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," ) def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ): __lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ ) __lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half() __lowercase = self.clip_model.get_image_features(UpperCAmelCase__ ) __lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ ) __lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 ) return image_embeddings_clip @torch.enable_grad() def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ): __lowercase = latents.detach().requires_grad_() __lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) # predict the noise residual __lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): __lowercase = self.scheduler.alphas_cumprod[timestep] __lowercase = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 __lowercase = torch.sqrt(UpperCAmelCase__ ) __lowercase = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler, UpperCAmelCase__ ): __lowercase = self.scheduler.sigmas[index] __lowercase = latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 1 / 0.18_215 * sample __lowercase = self.vae.decode(UpperCAmelCase__ ).sample __lowercase = (image / 2 + 0.5).clamp(0, 1 ) __lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ ) __lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype ) __lowercase = self.clip_model.get_image_features(UpperCAmelCase__ ) __lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ ) __lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale __lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0] if isinstance(self.scheduler, UpperCAmelCase__ ): __lowercase = latents.detach() + grads * (sigma**2) __lowercase = noise_pred_original else: __lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ): if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1: __lowercase = [generator] + [None] * (batch_size - 1) __lowercase = [ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] __lowercase = [x[0] for x in coca_is_none if x[1]] __lowercase = ", ".join(UpperCAmelCase__ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(UpperCAmelCase__ ): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) __lowercase = self.get_image_description(UpperCAmelCase__ ) if style_prompt is None: if len(UpperCAmelCase__ ): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) __lowercase = self.get_image_description(UpperCAmelCase__ ) # get prompt text embeddings for content and style __lowercase = self.tokenizer( UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", ) __lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] __lowercase = self.tokenizer( UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", ) __lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] __lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # duplicate text embeddings for each generation per prompt __lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 ) # set timesteps __lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) __lowercase = {} if accepts_offset: __lowercase = 1 self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) __lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device ) __lowercase = timesteps[:1].repeat(UpperCAmelCase__ ) # Preprocess image __lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.prepare_latents( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ ) __lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.prepare_latents( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ ) __lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) if clip_guidance_scale > 0: __lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = slerp( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __lowercase = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __lowercase = content_text_input.input_ids.shape[-1] __lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" ) __lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt __lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowercase = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8) __lowercase = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps __lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to( self.device ) else: __lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) __lowercase = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __lowercase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __lowercase = {} if accepts_eta: __lowercase = eta # check if the scheduler accepts generator __lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: __lowercase = generator with self.progress_bar(total=UpperCAmelCase__ ): for i, t in enumerate(UpperCAmelCase__ ): # expand the latents if we are doing classifier free guidance __lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ ) # predict the noise residual __lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample # perform classifier free guidance if do_classifier_free_guidance: __lowercase ,__lowercase = noise_pred.chunk(2 ) __lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: __lowercase = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) __lowercase ,__lowercase = self.cond_fn( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) # compute the previous noisy sample x_t -> x_t-1 __lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor __lowercase = 1 / 0.18_215 * latents __lowercase = self.vae.decode(UpperCAmelCase__ ).sample __lowercase = (image / 2 + 0.5).clamp(0, 1 ) __lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": __lowercase = self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
17
0
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class __A : """simple docstring""" UpperCamelCase__ : List[Any] =PegasusConfig UpperCamelCase__ : Optional[int] ={} UpperCamelCase__ : Any ="gelu" def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=40 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , ): """simple docstring""" __UpperCamelCase : str =parent __UpperCamelCase : str =batch_size __UpperCamelCase : Any =seq_length __UpperCamelCase : Optional[Any] =is_training __UpperCamelCase : Dict =use_labels __UpperCamelCase : List[Any] =vocab_size __UpperCamelCase : str =hidden_size __UpperCamelCase : List[str] =num_hidden_layers __UpperCamelCase : str =num_attention_heads __UpperCamelCase : Optional[int] =intermediate_size __UpperCamelCase : Optional[Any] =hidden_dropout_prob __UpperCamelCase : List[Any] =attention_probs_dropout_prob __UpperCamelCase : Optional[Any] =max_position_embeddings __UpperCamelCase : Union[str, Any] =eos_token_id __UpperCamelCase : List[str] =pad_token_id __UpperCamelCase : str =bos_token_id def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __UpperCamelCase : Any =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCamelCase : int =tf.concat([input_ids, eos_tensor] , axis=1 ) __UpperCamelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase : Optional[int] =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCamelCase : List[str] =prepare_pegasus_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return config, inputs_dict def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Tuple =TFPegasusModel(config=UpperCAmelCase__ ).get_decoder() __UpperCamelCase : str =inputs_dict['input_ids'] __UpperCamelCase : str =input_ids[:1, :] __UpperCamelCase : Tuple =inputs_dict['attention_mask'][:1, :] __UpperCamelCase : List[Any] =inputs_dict['head_mask'] __UpperCamelCase : Optional[Any] =1 # first forward pass __UpperCamelCase : Union[str, Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ ) __UpperCamelCase , __UpperCamelCase : Union[str, Any] =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __UpperCamelCase : Any =ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCamelCase : List[str] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __UpperCamelCase : Dict =tf.concat([input_ids, next_tokens] , axis=-1 ) __UpperCamelCase : Any =tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __UpperCamelCase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] __UpperCamelCase : List[Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __UpperCamelCase : Optional[int] =int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __UpperCamelCase : int =output_from_no_past[:, -3:, random_slice_idx] __UpperCamelCase : Optional[Any] =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-3 ) def A ( a_ ,a_ ,a_ ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,) -> Tuple: if attention_mask is None: __UpperCamelCase : Dict =tf.cast(tf.math.not_equal(UpperCamelCase_ ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: __UpperCamelCase : Optional[int] =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: __UpperCamelCase : Union[str, Any] =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __UpperCamelCase : Any =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __UpperCamelCase : Optional[int] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __A ( a , a , unittest.TestCase ): """simple docstring""" UpperCamelCase__ : Dict =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () UpperCamelCase__ : str =(TFPegasusForConditionalGeneration,) if is_tf_available() else () UpperCamelCase__ : Optional[int] =( { "conversational": TFPegasusForConditionalGeneration, "feature-extraction": TFPegasusModel, "summarization": TFPegasusForConditionalGeneration, "text2text-generation": TFPegasusForConditionalGeneration, "translation": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase__ : List[Any] =True UpperCamelCase__ : Union[str, Any] =False UpperCamelCase__ : Dict =False def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =TFPegasusModelTester(self ) __UpperCamelCase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ ) def __lowercase ( self ): """simple docstring""" self.config_tester.run_common_tests() def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Tuple =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__ ) @require_sentencepiece @require_tokenizers @require_tf class __A ( unittest.TestCase ): """simple docstring""" UpperCamelCase__ : Tuple =[ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] UpperCamelCase__ : List[str] =[ "California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to" " reduce the risk of wildfires.", "N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.", ] # differs slightly from pytorch, likely due to numerical differences in linear layers UpperCamelCase__ : Dict ="google/pegasus-xsum" @cached_property def __lowercase ( self ): """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Union[str, Any] =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __lowercase ( self , **lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Optional[Any] =self.translate_src_text(**UpperCAmelCase__ ) assert self.expected_text == generated_words def __lowercase ( self , **lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Union[str, Any] =self.tokenizer(self.src_text , **UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='tf' ) __UpperCamelCase : Optional[Any] =self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCAmelCase__ , ) __UpperCamelCase : Tuple =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCAmelCase__ ) return generated_words @slow def __lowercase ( self ): """simple docstring""" self._assert_generated_batch_equal_expected()
71
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class _lowerCAmelCase : """simple docstring""" __UpperCAmelCase : Tuple = XGLMConfig __UpperCAmelCase : Optional[Any] = {} __UpperCAmelCase : Union[str, Any] = "gelu" def __init__( self : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=1_4, UpperCAmelCase__ : str=7, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[Any]=True, UpperCAmelCase__ : int=True, UpperCAmelCase__ : List[str]=9_9, UpperCAmelCase__ : Union[str, Any]=3_2, UpperCAmelCase__ : Union[str, Any]=2, UpperCAmelCase__ : Union[str, Any]=4, UpperCAmelCase__ : Tuple=3_7, UpperCAmelCase__ : List[Any]="gelu", UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Tuple=5_1_2, UpperCAmelCase__ : Optional[Any]=0.02, ): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_labels __lowercase = vocab_size __lowercase = d_model __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = ffn_dim __lowercase = activation_function __lowercase = activation_dropout __lowercase = attention_dropout __lowercase = max_position_embeddings __lowercase = initializer_range __lowercase = None __lowercase = 0 __lowercase = 2 __lowercase = 1 def _lowercase ( self : Union[str, Any] ): return XGLMConfig.from_pretrained("facebook/xglm-564M" ) def _lowercase ( self : Tuple ): __lowercase = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = self.get_config() __lowercase = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowercase ( self : List[Any] ): return XGLMConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=UpperCAmelCase__, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=UpperCAmelCase__, ) def _lowercase ( self : Dict ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) = config_and_inputs __lowercase = { "input_ids": input_ids, "head_mask": head_mask, } return config, inputs_dict @require_tf class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCAmelCase : Any = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : List[str] = False __UpperCAmelCase : int = False def _lowercase ( self : Optional[Any] ): __lowercase = TFXGLMModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__, n_embd=3_7 ) def _lowercase ( self : Any ): self.config_tester.run_common_tests() @slow def _lowercase ( self : List[str] ): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = TFXGLMModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." ) def _lowercase ( self : int ): super().test_resize_token_embeddings() @require_tf class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int]=True ): __lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) __lowercase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]], dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __lowercase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on __lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist(), UpperCAmelCase__ ) @slow def _lowercase ( self : List[Any] ): __lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) __lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) tf.random.set_seed(0 ) __lowercase = tokenizer("Today is a nice day and", return_tensors="tf" ) __lowercase = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(":/CPU:0" ): __lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, seed=[7, 0] ) __lowercase = tokenizer.decode(output_ids[0], skip_special_tokens=UpperCAmelCase__ ) __lowercase = ( "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" ) self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ ) @slow def _lowercase ( self : Dict ): __lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" ) __lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) __lowercase = "left" # use different length sentences to test batching __lowercase = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When", "Hello, my dog is a little", ] __lowercase = tokenizer(UpperCAmelCase__, return_tensors="tf", padding=UpperCAmelCase__ ) __lowercase = inputs["input_ids"] __lowercase = model.generate(input_ids=UpperCAmelCase__, attention_mask=inputs["attention_mask"], max_new_tokens=1_2 ) __lowercase = tokenizer(sentences[0], return_tensors="tf" ).input_ids __lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 ) __lowercase = tokenizer(sentences[1], return_tensors="tf" ).input_ids __lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 ) __lowercase = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ ) __lowercase = tokenizer.decode(output_non_padded[0], skip_special_tokens=UpperCAmelCase__ ) __lowercase = tokenizer.decode(output_padded[0], skip_special_tokens=UpperCAmelCase__ ) __lowercase = [ "This is an extremelly long sentence that only exists to test the ability of the model to cope with " "left-padding, such as in batched generation. The output for the sequence below should be the same " "regardless of whether left padding is applied or not. When left padding is applied, the sequence will be " "a single", "Hello, my dog is a little bit of a shy one, but he is very friendly", ] self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__, [non_padded_sentence, padded_sentence] )
17
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json''' # See all FNet models at https://huggingface.co/models?filter=fnet } class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = "fnet" def __init__( self , __UpperCAmelCase=32_000 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=3_072 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=4 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=False , __UpperCAmelCase=512 , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> Tuple: '''simple docstring''' super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) __UpperCAmelCase : List[str] = vocab_size __UpperCAmelCase : int = max_position_embeddings __UpperCAmelCase : str = hidden_size __UpperCAmelCase : Tuple = num_hidden_layers __UpperCAmelCase : Optional[int] = intermediate_size __UpperCAmelCase : str = hidden_act __UpperCAmelCase : List[Any] = hidden_dropout_prob __UpperCAmelCase : List[Any] = initializer_range __UpperCAmelCase : Any = type_vocab_size __UpperCAmelCase : Optional[Any] = layer_norm_eps __UpperCAmelCase : Any = use_tpu_fourier_optimizations __UpperCAmelCase : Optional[int] = tpu_short_seq_length
254
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder _a = '__DUMMY_TRANSFORMERS_USER__' _a = 'Dummy User' _a = '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' _a = 'https://hub-ci.huggingface.co' _a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}' _a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}' _a = Path('~/.huggingface/hub_ci_token').expanduser() @pytest.fixture def _A ( UpperCamelCase_ : List[Any]) -> Tuple: '''simple docstring''' monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : int) -> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_ENDPOINT", UpperCamelCase_) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : str) -> Dict: '''simple docstring''' monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : List[Any]) -> List[str]: '''simple docstring''' HfFolder.save_token(UpperCamelCase_) yield HfFolder.delete_token() @pytest.fixture(scope="session") def _A ( ) -> List[Any]: '''simple docstring''' return HfApi(endpoint=UpperCamelCase_) @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi) -> List[Any]: '''simple docstring''' __lowercase = HfFolder.get_token() HfFolder.save_token(UpperCamelCase_) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(UpperCamelCase_) @pytest.fixture def _A ( UpperCamelCase_ : Dict) -> int: '''simple docstring''' def _cleanup_repo(UpperCamelCase_ : Optional[int]): hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") return _cleanup_repo @pytest.fixture def _A ( UpperCamelCase_ : str) -> Any: '''simple docstring''' @contextmanager def _temporary_repo(UpperCamelCase_ : Any): try: yield repo_id finally: cleanup_repo(UpperCamelCase_) return _temporary_repo @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : str, UpperCamelCase_ : Optional[int]) -> List[Any]: '''simple docstring''' __lowercase = F"""repo_txt_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data/text_data.txt", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Any, UpperCamelCase_ : Dict) -> Optional[int]: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]) -> int: '''simple docstring''' __lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Dict, UpperCamelCase_ : Any) -> int: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session") def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> List[Any]: '''simple docstring''' __lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3)}""" __lowercase = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_) hf_api.upload_file( token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", ) yield repo_id try: hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset") except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> str: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
17
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a : Any = logging.get_logger(__name__) def __lowerCamelCase ( _lowercase ) -> Optional[Any]: UpperCAmelCase : Dict = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: UpperCAmelCase : str = [1_4_4, 1_9_2, 2_4_0] UpperCAmelCase : Optional[Any] = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0] elif "mobilevit_xs" in mobilevit_name: UpperCAmelCase : Tuple = [9_6, 1_2_0, 1_4_4] UpperCAmelCase : Optional[Any] = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4] elif "mobilevit_xxs" in mobilevit_name: UpperCAmelCase : Tuple = [6_4, 8_0, 9_6] UpperCAmelCase : List[Any] = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0] UpperCAmelCase : Optional[Any] = 0.05 UpperCAmelCase : List[Any] = 2.0 if mobilevit_name.startswith("""deeplabv3_""" ): UpperCAmelCase : Optional[int] = 5_1_2 UpperCAmelCase : str = 1_6 UpperCAmelCase : Union[str, Any] = 2_1 UpperCAmelCase : List[str] = """pascal-voc-id2label.json""" else: UpperCAmelCase : Optional[int] = 1_0_0_0 UpperCAmelCase : Tuple = """imagenet-1k-id2label.json""" UpperCAmelCase : int = """huggingface/label-files""" UpperCAmelCase : Any = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : Any = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} UpperCAmelCase : List[str] = idalabel UpperCAmelCase : str = {v: k for k, v in idalabel.items()} return config def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Optional[Any]: for i in range(1 , 6 ): if F'''layer_{i}.''' in name: UpperCAmelCase : List[str] = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' ) if "conv_1." in name: UpperCAmelCase : Tuple = name.replace("""conv_1.""" , """conv_stem.""" ) if ".block." in name: UpperCAmelCase : Optional[Any] = name.replace(""".block.""" , """.""" ) if "exp_1x1" in name: UpperCAmelCase : Tuple = name.replace("""exp_1x1""" , """expand_1x1""" ) if "red_1x1" in name: UpperCAmelCase : List[Any] = name.replace("""red_1x1""" , """reduce_1x1""" ) if ".local_rep.conv_3x3." in name: UpperCAmelCase : str = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" ) if ".local_rep.conv_1x1." in name: UpperCAmelCase : Tuple = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" ) if ".norm." in name: UpperCAmelCase : Union[str, Any] = name.replace(""".norm.""" , """.normalization.""" ) if ".conv." in name: UpperCAmelCase : List[str] = name.replace(""".conv.""" , """.convolution.""" ) if ".conv_proj." in name: UpperCAmelCase : Optional[int] = name.replace(""".conv_proj.""" , """.conv_projection.""" ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F'''.{i}.{j}.''' in name: UpperCAmelCase : List[Any] = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F'''.{i}.{j}.''' in name: UpperCAmelCase : Optional[Any] = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' ) if "expand_1x1" in name: UpperCAmelCase : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" ) if "conv_3x3" in name: UpperCAmelCase : Any = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" ) if "reduce_1x1" in name: UpperCAmelCase : Optional[int] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" ) for i in range(2 , 5 ): if F'''.global_rep.{i}.weight''' in name: UpperCAmelCase : str = name.replace(F'''.global_rep.{i}.weight''' , """.layernorm.weight""" ) if F'''.global_rep.{i}.bias''' in name: UpperCAmelCase : List[str] = name.replace(F'''.global_rep.{i}.bias''' , """.layernorm.bias""" ) if ".global_rep." in name: UpperCAmelCase : Union[str, Any] = name.replace(""".global_rep.""" , """.transformer.""" ) if ".pre_norm_mha.0." in name: UpperCAmelCase : Any = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" ) if ".pre_norm_mha.1.out_proj." in name: UpperCAmelCase : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" ) if ".pre_norm_ffn.0." in name: UpperCAmelCase : str = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" ) if ".pre_norm_ffn.1." in name: UpperCAmelCase : List[str] = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" ) if ".pre_norm_ffn.4." in name: UpperCAmelCase : str = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" ) if ".transformer." in name: UpperCAmelCase : Tuple = name.replace(""".transformer.""" , """.transformer.layer.""" ) if ".aspp_layer." in name: UpperCAmelCase : Optional[Any] = name.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in name: UpperCAmelCase : str = name.replace(""".aspp_pool.""" , """.""" ) if "seg_head." in name: UpperCAmelCase : int = name.replace("""seg_head.""" , """segmentation_head.""" ) if "segmentation_head.classifier.classifier." in name: UpperCAmelCase : Union[str, Any] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" ) if "classifier.fc." in name: UpperCAmelCase : str = name.replace("""classifier.fc.""" , """classifier.""" ) elif (not base_model) and ("segmentation_head." not in name): UpperCAmelCase : Optional[Any] = """mobilevit.""" + name return name def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=False ) -> Tuple: if base_model: UpperCAmelCase : List[str] = """""" else: UpperCAmelCase : List[Any] = """mobilevit.""" for key in orig_state_dict.copy().keys(): UpperCAmelCase : Optional[int] = orig_state_dict.pop(UpperCamelCase_ ) if key[:8] == "encoder.": UpperCAmelCase : Union[str, Any] = key[8:] if "qkv" in key: UpperCAmelCase : str = key.split(""".""" ) UpperCAmelCase : Optional[int] = int(key_split[0][6:] ) - 1 UpperCAmelCase : List[Any] = int(key_split[3] ) UpperCAmelCase : List[Any] = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' ) UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size UpperCAmelCase : List[str] = ( F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.''' ) if "weight" in key: UpperCAmelCase : Dict = val[:dim, :] UpperCAmelCase : str = val[dim : dim * 2, :] UpperCAmelCase : List[str] = val[-dim:, :] else: UpperCAmelCase : Optional[int] = val[:dim] UpperCAmelCase : Tuple = val[dim : dim * 2] UpperCAmelCase : Tuple = val[-dim:] else: UpperCAmelCase : Tuple = val return orig_state_dict def __lowerCamelCase ( ) -> Tuple: UpperCAmelCase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase : Any = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> str: UpperCAmelCase : str = get_mobilevit_config(UpperCamelCase_ ) # load original state_dict UpperCAmelCase : int = torch.load(UpperCamelCase_ , map_location="""cpu""" ) # load 🤗 model if mobilevit_name.startswith("""deeplabv3_""" ): UpperCAmelCase : int = MobileViTForSemanticSegmentation(UpperCamelCase_ ).eval() else: UpperCAmelCase : int = MobileViTForImageClassification(UpperCamelCase_ ).eval() UpperCAmelCase : int = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) # Check outputs on an image, prepared by MobileViTImageProcessor UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 ) UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" ) UpperCAmelCase : Tuple = model(**UpperCamelCase_ ) UpperCAmelCase : Optional[Any] = outputs.logits if mobilevit_name.startswith("""deeplabv3_""" ): assert logits.shape == (1, 2_1, 3_2, 3_2) if mobilevit_name == "deeplabv3_mobilevit_s": UpperCAmelCase : str = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": UpperCAmelCase : int = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": UpperCAmelCase : int = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ] ) else: raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-4 ) else: assert logits.shape == (1, 1_0_0_0) if mobilevit_name == "mobilevit_s": UpperCAmelCase : Optional[int] = torch.tensor([-0.9866, 0.2392, -1.1241] ) elif mobilevit_name == "mobilevit_xs": UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] ) elif mobilevit_name == "mobilevit_xxs": UpperCAmelCase : str = torch.tensor([-1.9364, -1.2327, -0.4653] ) else: raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase_ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCamelCase_ ) if push_to_hub: UpperCAmelCase : Optional[Any] = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""" ) UpperCAmelCase : Union[str, Any] = model_mapping[mobilevit_name] image_processor.push_to_hub(UpperCamelCase_ , organization="""apple""" ) model.push_to_hub(UpperCamelCase_ , organization="""apple""" ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--mobilevit_name""", default="""mobilevit_s""", type=str, help=( """Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',""" """ \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.""" ), ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) a : Optional[int] = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
265
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging _a = logging.get_logger(__name__) _a = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : int = "time_series_transformer" __UpperCAmelCase : Any = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self : int, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : str = "student_t", UpperCAmelCase__ : str = "nll", UpperCAmelCase__ : int = 1, UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7], UpperCAmelCase__ : Optional[Union[str, bool]] = "mean", UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : str = "gelu", UpperCAmelCase__ : int = 6_4, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : int = 1_0_0, UpperCAmelCase__ : float = 0.02, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : List[str], ): # time series specific configuration __lowercase = prediction_length __lowercase = context_length or prediction_length __lowercase = distribution_output __lowercase = loss __lowercase = input_size __lowercase = num_time_features __lowercase = lags_sequence __lowercase = scaling __lowercase = num_dynamic_real_features __lowercase = num_static_real_features __lowercase = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) __lowercase = cardinality else: __lowercase = [0] if embedding_dimension and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) __lowercase = embedding_dimension else: __lowercase = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality] __lowercase = num_parallel_samples # Transformer architecture configuration __lowercase = input_size * len(UpperCAmelCase__ ) + self._number_of_features __lowercase = d_model __lowercase = encoder_attention_heads __lowercase = decoder_attention_heads __lowercase = encoder_ffn_dim __lowercase = decoder_ffn_dim __lowercase = encoder_layers __lowercase = decoder_layers __lowercase = dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = encoder_layerdrop __lowercase = decoder_layerdrop __lowercase = activation_function __lowercase = init_std __lowercase = use_cache super().__init__(is_encoder_decoder=UpperCAmelCase__, **UpperCAmelCase__ ) @property def _lowercase ( self : Optional[Any] ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
17
0
"""simple docstring""" from __future__ import annotations import time __lowerCamelCase = list[tuple[int, int]] __lowerCamelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __lowerCamelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class UpperCamelCase__: def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: A__ = pos_x A__ = pos_y A__ = (pos_y, pos_x) A__ = goal_x A__ = goal_y A__ = parent class UpperCamelCase__: def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict: A__ = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,UpperCAmelCase__ ) A__ = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,UpperCAmelCase__ ) A__ = [self.start] A__ = False def snake_case__ ( self ) -> int: while self.node_queue: A__ = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: A__ = True return self.retrace_path(UpperCAmelCase__ ) A__ = self.get_successors(UpperCAmelCase__ ) for node in successors: self.node_queue.append(UpperCAmelCase__ ) if not self.reached: return [self.start.pos] return None def snake_case__ ( self ,__UpperCAmelCase ) -> List[str]: A__ = [] for action in delta: A__ = parent.pos_x + action[1] A__ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase__ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(UpperCAmelCase__ ,UpperCAmelCase__ ,self.target.pos_y ,self.target.pos_x ,UpperCAmelCase__ ) ) return successors def snake_case__ ( self ,__UpperCAmelCase ) -> str: A__ = node A__ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A__ = current_node.parent path.reverse() return path class UpperCamelCase__: def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: A__ = BreadthFirstSearch(UpperCAmelCase__ ,UpperCAmelCase__ ) A__ = BreadthFirstSearch(UpperCAmelCase__ ,UpperCAmelCase__ ) A__ = False def snake_case__ ( self ) -> Tuple: while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: A__ = self.fwd_bfs.node_queue.pop(0 ) A__ = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: A__ = True return self.retrace_bidirectional_path( UpperCAmelCase__ ,UpperCAmelCase__ ) A__ = current_bwd_node A__ = current_fwd_node A__ = { self.fwd_bfs: self.fwd_bfs.get_successors(UpperCAmelCase__ ), self.bwd_bfs: self.bwd_bfs.get_successors(UpperCAmelCase__ ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(UpperCAmelCase__ ) if not self.reached: return [self.fwd_bfs.start.pos] return None def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict: A__ = self.fwd_bfs.retrace_path(UpperCAmelCase__ ) A__ = self.bwd_bfs.retrace_path(UpperCAmelCase__ ) bwd_path.pop() bwd_path.reverse() A__ = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() __lowerCamelCase = (0, 0) __lowerCamelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __lowerCamelCase = time.time() __lowerCamelCase = BreadthFirstSearch(init, goal) __lowerCamelCase = bfs.search() __lowerCamelCase = time.time() - start_bfs_time print("Unidirectional BFS computation time : ", bfs_time) __lowerCamelCase = time.time() __lowerCamelCase = BidirectionalBreadthFirstSearch(init, goal) __lowerCamelCase = bd_bfs.search() __lowerCamelCase = time.time() - start_bd_bfs_time print("Bidirectional BFS computation time : ", bd_bfs_time)
221
"""simple docstring""" import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _lowerCAmelCase : """simple docstring""" @staticmethod def _lowercase ( *UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : List[Any] ): pass def _A ( UpperCamelCase_ : Union[str, Any]) -> Any: '''simple docstring''' return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. _a = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any] ): __lowercase = pipeline( "document-question-answering", model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ ) __lowercase = INVOICE_URL __lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) ) __lowercase = "What is the placebo?" __lowercase = [ { "image": load_image(UpperCAmelCase__ ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def _lowercase ( self : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any ): __lowercase = dqa_pipeline(UpperCAmelCase__, top_k=2 ) self.assertEqual( UpperCAmelCase__, [ [ {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )}, {"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )}, ] ] * 3, ) @require_torch @require_detectrona @require_pytesseract def _lowercase ( self : Dict ): __lowercase = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2" ) __lowercase = INVOICE_URL __lowercase = "How many cats are there?" __lowercase = [ {"score": 0.0_001, "answer": "oy 2312/2019", "start": 3_8, "end": 3_9}, {"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 3_8, "end": 4_0}, ] __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably __lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual(UpperCAmelCase__, [] ) # We can optionnally pass directly the words and bounding boxes __lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png" __lowercase = [] __lowercase = [] __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, words=UpperCAmelCase__, boxes=UpperCAmelCase__, top_k=2 ) self.assertEqual(UpperCAmelCase__, [] ) @slow @require_torch @require_detectrona @require_pytesseract def _lowercase ( self : List[str] ): __lowercase = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ] * 2, ) @slow @require_torch @require_detectrona @require_pytesseract def _lowercase ( self : Dict ): __lowercase = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=5_0, ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3}, {"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6}, ] ] * 2, ) @slow @require_torch @require_pytesseract @require_vision def _lowercase ( self : Optional[Any] ): __lowercase = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ ) __lowercase = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ], ) __lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ] ] * 2, ) __lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) ) # This model should also work if `image` is set to None __lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3}, ], ) @slow @require_torch @require_pytesseract @require_vision def _lowercase ( self : Union[str, Any] ): __lowercase = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ ) __lowercase = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", max_seq_len=5_0, ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) __lowercase = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ [ {"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6}, ] ] * 2, ) __lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) ) # This model should also work if `image` is set to None __lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__, decimals=4 ), [ {"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6}, {"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6}, ], ) @slow @require_torch def _lowercase ( self : Dict ): __lowercase = pipeline( "document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", ) __lowercase = INVOICE_URL __lowercase = "What is the invoice number?" __lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def _lowercase ( self : List[Any] ): pass
17
0
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch _UpperCamelCase = logging.get_logger(__name__) class _lowerCamelCase ( a ): """simple docstring""" UpperCAmelCase_ : Tuple =["pixel_values"] def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> Dict: '''simple docstring''' super().__init__(**UpperCAmelCase__ ) __snake_case : Tuple = size if size is not None else {"shortest_edge": 256} __snake_case : List[Any] = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ ) __snake_case : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224} __snake_case : Optional[Any] = get_size_dict(UpperCAmelCase__ , param_name="crop_size" ) __snake_case : Any = do_resize __snake_case : int = size __snake_case : int = resample __snake_case : Any = do_center_crop __snake_case : Any = crop_size __snake_case : Optional[int] = do_rescale __snake_case : Tuple = rescale_factor __snake_case : str = do_normalize __snake_case : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ) -> str: '''simple docstring''' __snake_case : int = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __snake_case : str = get_resize_output_image_size(UpperCAmelCase__ , size=size["shortest_edge"] , default_to_square=UpperCAmelCase__ ) return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> str: '''simple docstring''' __snake_case : List[str] = get_size_dict(UpperCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(UpperCAmelCase__ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase ) -> Any: '''simple docstring''' return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> Any: '''simple docstring''' return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> Any: '''simple docstring''' __snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize __snake_case : List[Any] = size if size is not None else self.size __snake_case : str = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ ) __snake_case : Tuple = resample if resample is not None else self.resample __snake_case : int = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case : str = crop_size if crop_size is not None else self.crop_size __snake_case : List[str] = get_size_dict(UpperCAmelCase__ , param_name="crop_size" ) __snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Optional[Any] = image_mean if image_mean is not None else self.image_mean __snake_case : Any = image_std if image_std is not None else self.image_std __snake_case : Optional[int] = make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __snake_case : int = [to_numpy_array(UpperCAmelCase__ ) for image in images] if do_resize: __snake_case : Tuple = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images] if do_center_crop: __snake_case : Union[str, Any] = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images] if do_rescale: __snake_case : Tuple = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_normalize: __snake_case : int = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images] __snake_case : Union[str, Any] = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] __snake_case : Tuple = {"pixel_values": images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[Any]: '''simple docstring''' __snake_case : int = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(UpperCAmelCase__ ): __snake_case : Dict = target_sizes.numpy() __snake_case : Tuple = [] for idx in range(len(UpperCAmelCase__ ) ): __snake_case : Tuple = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCAmelCase__ ) __snake_case : Any = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCAmelCase__ ) else: __snake_case : Tuple = logits.argmax(dim=1 ) __snake_case : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
326
"""simple docstring""" import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() _a = 2 class _lowerCAmelCase : """simple docstring""" def __init__( self : Dict, *, # begin keyword-only arguments UpperCAmelCase__ : str="<s>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : str="</s>", UpperCAmelCase__ : Optional[Any]="<unk>", UpperCAmelCase__ : List[Any]=None, ): __lowercase ,__lowercase ,__lowercase ,__lowercase = bos, unk, pad, eos __lowercase = [] __lowercase = [] __lowercase = {} __lowercase = self.add_symbol(UpperCAmelCase__ ) __lowercase = self.add_symbol(UpperCAmelCase__ ) __lowercase = self.add_symbol(UpperCAmelCase__ ) __lowercase = self.add_symbol(UpperCAmelCase__ ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(UpperCAmelCase__ ) __lowercase = len(self.symbols ) def __eq__( self : List[str], UpperCAmelCase__ : Dict ): return self.indices == other.indices def __getitem__( self : Optional[int], UpperCAmelCase__ : List[str] ): if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : str ): return len(self.symbols ) def __contains__( self : Any, UpperCAmelCase__ : Optional[Any] ): return sym in self.indices @classmethod def _lowercase ( cls : List[Any], UpperCAmelCase__ : Optional[Any] ): __lowercase = cls() d.add_from_file(UpperCAmelCase__ ) return d def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : str=False ): if word in self.indices and not overwrite: __lowercase = self.indices[word] __lowercase = self.count[idx] + n return idx else: __lowercase = len(self.symbols ) __lowercase = idx self.symbols.append(UpperCAmelCase__ ) self.count.append(UpperCAmelCase__ ) return idx def _lowercase ( self : Any, UpperCAmelCase__ : str ): return 0 def _lowercase ( self : Tuple, UpperCAmelCase__ : List[Any] ): if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): try: with open(UpperCAmelCase__, "r", encoding="utf-8" ) as fd: self.add_from_file(UpperCAmelCase__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCAmelCase__ ) ) return __lowercase = f.readlines() __lowercase = self._load_meta(UpperCAmelCase__ ) for line in lines[indices_start_line:]: try: __lowercase ,__lowercase = line.rstrip().rsplit(" ", 1 ) if field == "#fairseq:overwrite": __lowercase = True __lowercase ,__lowercase = line.rsplit(" ", 1 ) else: __lowercase = False __lowercase = int(UpperCAmelCase__ ) __lowercase = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(UpperCAmelCase__ ) ) self.add_symbol(UpperCAmelCase__, n=UpperCAmelCase__, overwrite=UpperCAmelCase__ ) except ValueError: raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" ) def _A ( UpperCamelCase_ : int) -> str: '''simple docstring''' __lowercase = dict((re.sub(r"@@$", "", UpperCamelCase_), v) if k.endswith("@@") else (re.sub(r"$", "</w>", UpperCamelCase_), v) for k, v in d.items()) __lowercase = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[F"""{k}</w>"""] __lowercase = d[k] # restore return da def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> List[Any]: '''simple docstring''' if not os.path.exists(UpperCamelCase_): raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""") os.makedirs(UpperCamelCase_, exist_ok=UpperCamelCase_) print(F"""Writing results to {pytorch_dump_folder_path}""") # handle various types of models __lowercase = os.path.join(UpperCamelCase_, "checkpoint.pt") if not os.path.isfile(UpperCamelCase_): raise ValueError(F"""path to the file {checkpoint_file} does not exist!""") __lowercase = torch.load(UpperCamelCase_, map_location="cpu") __lowercase = chkpt["cfg"]["model"] # dicts __lowercase = os.path.join(UpperCamelCase_, "dict.txt") if not os.path.isfile(UpperCamelCase_): raise ValueError(F"""path to the file {dict_file} does not exist!""") __lowercase = Dictionary.load(UpperCamelCase_) __lowercase = rewrite_dict_keys(src_dict.indices) __lowercase = len(UpperCamelCase_) __lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["vocab_file"]) print(F"""Generating {src_vocab_file} of {src_vocab_size} records""") with open(UpperCamelCase_, "w", encoding="utf-8") as f: f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_)) # merges_file (bpecodes) __lowercase = os.path.join(UpperCamelCase_, "bpecodes") if not os.path.isfile(UpperCamelCase_): raise ValueError(F"""path to the file {bpecodes_file} does not exist!""") __lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["merges_file"]) shutil.copyfile(UpperCamelCase_, UpperCamelCase_) # model config __lowercase = os.path.join(UpperCamelCase_, "config.json") __lowercase = { "activation_dropout": args["activation_dropout"], "architectures": ["BioGptForCausalLM"], "attention_probs_dropout_prob": args["attention_dropout"], "bos_token_id": 0, "eos_token_id": 2, "hidden_act": args["activation_fn"], "hidden_dropout_prob": args["dropout"], "hidden_size": args["decoder_embed_dim"], "initializer_range": 0.02, "intermediate_size": args["decoder_ffn_embed_dim"], "layer_norm_eps": 1E-12, "layerdrop": args["decoder_layerdrop"], "max_position_embeddings": args["max_target_positions"], "model_type": "biogpt", "num_attention_heads": args["decoder_attention_heads"], "num_hidden_layers": args["decoder_layers"], "pad_token_id": 1, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_decoder_input_output_embed"], "vocab_size": src_vocab_size, } # good hparam defaults to start with print(F"""Generating {biogpt_model_config_file}""") with open(UpperCamelCase_, "w", encoding="utf-8") as f: f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_)) # tokenizer config __lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_) __lowercase = { "bos_token": "<s>", "eos_token": "</s>", "model_max_length": 1024, "pad_token": "<pad>", "special_tokens_map_file": None, "tokenizer_class": "BioGptTokenizer", "unk_token": "<unk>", } print(F"""Generating {biogpt_tokenizer_config_file}""") with open(UpperCamelCase_, "w", encoding="utf-8") as f: f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_)) # model __lowercase = chkpt["model"] # remove unneeded keys __lowercase = [ "decoder.version", ] for k in ignore_keys: model_state_dict.pop(UpperCamelCase_, UpperCamelCase_) __lowercase = list(model_state_dict.keys()) for layer_name in layer_names: if layer_name.endswith("output_projection.weight"): __lowercase = model_state_dict.pop(UpperCamelCase_) else: __lowercase = model_state_dict.pop(UpperCamelCase_) __lowercase = BioGptConfig.from_pretrained(UpperCamelCase_) __lowercase = BioGptForCausalLM(UpperCamelCase_) # check that it loads ok model_new.load_state_dict(UpperCamelCase_) # save __lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_) print(F"""Generating {pytorch_weights_dump_path}""") torch.save(UpperCamelCase_, UpperCamelCase_) print("Conversion is done!") if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--biogpt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _a = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
17
0
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""",[ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ],) def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Tuple = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""","""w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""","""w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""","""w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) _A : Optional[int] = DatasetInfosDict.from_directory(UpperCamelCase_ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""",[ DatasetInfo(), DatasetInfo( description="""foo""",features=Features({"""a""": Value("""int32""" )} ),builder_name="""builder""",config_name="""config""",version="""1.0.0""",splits=[{"""name""": """train"""}],download_size=42,), ],) def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : List[str] = str(UpperCamelCase_ ) dataset_info.write_to_directory(UpperCamelCase_ ) _A : Dict = DatasetInfo.from_directory(UpperCamelCase_ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCamelCase_,"""dataset_info.json""" ) ) def lowerCAmelCase_ ( ): _A : Union[str, Any] = DatasetInfo( description="""foo""",citation="""bar""",homepage="""https://foo.bar""",license="""CC0""",features=Features({"""a""": Value("""int32""" )} ),post_processed={},supervised_keys=(),task_templates=[],builder_name="""builder""",config_name="""config""",version="""1.0.0""",splits=[{"""name""": """train""", """num_examples""": 42}],download_checksums={},download_size=1337,post_processing_size=442,dataset_size=1234,size_in_bytes=1337 + 442 + 1234,) _A : Any = dataset_info._to_yaml_dict() assert sorted(UpperCamelCase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key],(list, dict, int, str) ) _A : Optional[int] = yaml.safe_dump(UpperCamelCase_ ) _A : Optional[int] = yaml.safe_load(UpperCamelCase_ ) assert dataset_info_yaml_dict == reloaded def lowerCAmelCase_ ( ): _A : Optional[int] = DatasetInfo() _A : Union[str, Any] = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""",[ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""",features=Features({"""a""": Value("""int32""" )} ),builder_name="""builder""",config_name="""config""",version="""1.0.0""",splits=[{"""name""": """train"""}],download_size=42,) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1337 ), } ), ],) def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Dict = str(UpperCamelCase_ ) dataset_infos_dict.write_to_directory(UpperCamelCase_ ) _A : Any = DatasetInfosDict.from_directory(UpperCamelCase_ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _A : Any = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _A : str = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCamelCase_,"""README.md""" ) )
26
"""simple docstring""" from __future__ import annotations from typing import Any class _lowerCAmelCase : """simple docstring""" def __init__( self : Any, UpperCAmelCase__ : int ): __lowercase = num_of_nodes __lowercase = [] __lowercase = {} def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ): self.m_edges.append([u_node, v_node, weight] ) def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def _lowercase ( self : List[Any], UpperCAmelCase__ : int ): if self.m_component[u_node] != u_node: for k in self.m_component: __lowercase = self.find_component(UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ): if component_size[u_node] <= component_size[v_node]: __lowercase = v_node component_size[v_node] += component_size[u_node] self.set_component(UpperCAmelCase__ ) elif component_size[u_node] >= component_size[v_node]: __lowercase = self.find_component(UpperCAmelCase__ ) component_size[u_node] += component_size[v_node] self.set_component(UpperCAmelCase__ ) def _lowercase ( self : Any ): __lowercase = [] __lowercase = 0 __lowercase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) __lowercase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: __lowercase ,__lowercase ,__lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): __lowercase = [u, v, w] for edge in minimum_weight_edge: if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase ,__lowercase ,__lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 __lowercase = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def _A ( ) -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
17
0
"""simple docstring""" import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def _snake_case ( _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : List[Any] ) -> str: '''simple docstring''' _A = StableDiffusionPipeline.from_pretrained(UpperCamelCase_ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors _A = load_file(UpperCamelCase_ ) _A = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: _A = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' ) _A = pipeline.text_encoder else: _A = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' ) _A = pipeline.unet # find the target layer _A = layer_infos.pop(0 ) while len(UpperCamelCase_ ) > -1: try: _A = curr_layer.__getattr__(UpperCamelCase_ ) if len(UpperCamelCase_ ) > 0: _A = layer_infos.pop(0 ) elif len(UpperCamelCase_ ) == 0: break except Exception: if len(UpperCamelCase_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: _A = layer_infos.pop(0 ) _A = [] if "lora_down" in key: pair_keys.append(key.replace('lora_down' , 'lora_up' ) ) pair_keys.append(UpperCamelCase_ ) else: pair_keys.append(UpperCamelCase_ ) pair_keys.append(key.replace('lora_up' , 'lora_down' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: _A = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) _A = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(UpperCamelCase_ , UpperCamelCase_ ).unsqueeze(2 ).unsqueeze(3 ) else: _A = state_dict[pair_keys[0]].to(torch.floataa ) _A = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(UpperCamelCase_ , UpperCamelCase_ ) # update visited list for item in pair_keys: visited.append(UpperCamelCase_ ) return pipeline if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument( '''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.''' ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors''' ) parser.add_argument( '''--lora_prefix_text_encoder''', default='''lora_te''', type=str, help='''The prefix of text encoder weight in safetensors''', ) parser.add_argument('''--alpha''', default=0.7_5, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''') parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''' ) parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') a = parser.parse_args() a = args.base_model_path a = args.checkpoint_path a = args.dump_path a = args.lora_prefix_unet a = args.lora_prefix_text_encoder a = args.alpha a = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) a = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
315
"""simple docstring""" from math import sqrt def _A ( UpperCamelCase_ : int) -> int: '''simple docstring''' __lowercase = 0 for i in range(1, int(sqrt(UpperCamelCase_) + 1)): if n % i == 0 and i != sqrt(UpperCamelCase_): total += i + n // i elif i == sqrt(UpperCamelCase_): total += i return total - n def _A ( UpperCamelCase_ : int = 10000) -> int: '''simple docstring''' __lowercase = sum( i for i in range(1, UpperCamelCase_) if sum_of_divisors(sum_of_divisors(UpperCamelCase_)) == i and sum_of_divisors(UpperCamelCase_) != i) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
17
0