code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ):
super().__init__(features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = Sql(
cache_dir=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , sql=__SCREAMING_SNAKE_CASE , con=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self ):
snake_case__ : Any = None
snake_case__ : int = None
snake_case__ : Dict = None
snake_case__ : List[Any] = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , )
# Build dataset for splits
snake_case__ : List[str] = self.builder.as_dataset(
split="""train""" , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
snake_case__ : Dict = dataset
snake_case__ : Any = name
snake_case__ : Tuple = con
snake_case__ : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
snake_case__ : Optional[int] = num_proc
snake_case__ : str = to_sql_kwargs
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.to_sql_kwargs.pop("""sql""" , __SCREAMING_SNAKE_CASE )
snake_case__ : int = self.to_sql_kwargs.pop("""con""" , __SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.to_sql_kwargs.pop("""index""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self._write(index=__SCREAMING_SNAKE_CASE , **self.to_sql_kwargs )
return written
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ , snake_case__ , snake_case__ : List[Any] = args
snake_case__ : List[str] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
snake_case__ : List[str] = query_table(
table=self.dataset.data , key=slice(__SCREAMING_SNAKE_CASE , offset + self.batch_size ) , indices=self.dataset._indices , )
snake_case__ : int = batch.to_pandas()
snake_case__ : Optional[Any] = df.to_sql(self.name , self.con , index=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return num_rows or len(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
snake_case__ : int = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
snake_case__ , snake_case__ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 38 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 | 1 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
A_ : List[Any] = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
A_ : List[str] = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
snake_case__ : int = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : Dict = char
snake_case__ : int = set(__magic_name__ )
return pairs
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , **__SCREAMING_SNAKE_CASE , ):
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = vocab_file
snake_case__ : Optional[Any] = merges_file
snake_case__ : Dict = {}
snake_case__ : Any = 0
snake_case__ : int = 1
snake_case__ : int = 2
snake_case__ : List[Any] = 3
self.add_from_file(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as merges_handle:
snake_case__ : Any = merges_handle.read().split("""\n""" )[:-1]
snake_case__ : int = [tuple(merge.split()[:-1] ) for merge in merges]
snake_case__ : List[str] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : List[str] = {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : int = [self.cls_token_id]
snake_case__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCamelCase ( self ):
return len(self.encoder )
def __UpperCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if token in self.cache:
return self.cache[token]
snake_case__ : List[Any] = tuple(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
snake_case__ : Any = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
snake_case__ : Optional[Any] = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ : Tuple = bigram
snake_case__ : Dict = []
snake_case__ : str = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
snake_case__ : Tuple = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : List[str] = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : Dict = tuple(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
snake_case__ : Union[str, Any] = get_pairs(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = """@@ """.join(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = word[:-4]
snake_case__ : Dict = word
return word
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = []
snake_case__ : Any = re.findall(R"""\S+\n?""" , __SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(__SCREAMING_SNAKE_CASE ).split(""" """ ) ) )
return split_tokens
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = """ """.join(__SCREAMING_SNAKE_CASE ).replace("""@@ """ , """""" ).strip()
return out_string
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case__ : Optional[int] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Any = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file , __SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
try:
with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
snake_case__ : Tuple = f.readlines()
for lineTmp in lines:
snake_case__ : Any = lineTmp.strip()
snake_case__ : Optional[Any] = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
snake_case__ : Optional[int] = line[:idx]
snake_case__ : Union[str, Any] = len(self.encoder )
| 38 |
'''simple docstring'''
from __future__ import annotations
A_ : str = "Muhammad Umer Farooq"
A_ : Optional[Any] = "MIT"
A_ : int = "1.0.0"
A_ : int = "Muhammad Umer Farooq"
A_ : int = "[email protected]"
A_ : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : list[str] = []
snake_case__ : List[Any] = domain
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ : str = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__magic_name__ ).split(""".""" )[-2:] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return parse.urlparse(__magic_name__ ).netloc
def UpperCamelCase__ ( __magic_name__ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case__ : Optional[Any] = Parser(__magic_name__ )
try:
# Open URL
snake_case__ : Any = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ : Tuple = requests.get(__magic_name__ )
# Get the valid email.
snake_case__ : List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
A_ : str = emails_from_url("https://github.com")
print(F'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 38 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
A_ : List[str] = logging.get_logger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = ['''input_values''', '''padding_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 2_4_0_0_0 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = chunk_length_s
snake_case__ : Union[str, Any] = overlap
@property
def __UpperCamelCase ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __UpperCamelCase ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
snake_case__ : Tuple = True
snake_case__ : Dict = bool(
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
snake_case__ : Optional[Any] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
snake_case__ : int = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
snake_case__ : Dict = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ : int = [np.asarray(__SCREAMING_SNAKE_CASE ).T]
# verify inputs are valid
for idx, example in enumerate(__SCREAMING_SNAKE_CASE ):
if example.ndim > 2:
raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels" )
snake_case__ : Union[str, Any] = None
snake_case__ : Union[str, Any] = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
snake_case__ : str = min(array.shape[0] for array in raw_audio )
snake_case__ : Optional[int] = int(np.floor(max_length / self.chunk_stride ) )
snake_case__ : Optional[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
snake_case__ : Tuple = max(array.shape[0] for array in raw_audio )
snake_case__ : Any = int(np.ceil(max_length / self.chunk_stride ) )
snake_case__ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
snake_case__ : int = """max_length"""
else:
snake_case__ : List[Any] = input_values
# normal padding on batch
if padded_inputs is None:
snake_case__ : Dict = self.pad(
__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
if padding:
snake_case__ : Dict = padded_inputs.pop("""attention_mask""" )
snake_case__ : Optional[int] = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
snake_case__ : Any = example[..., None]
input_values.append(example.T )
snake_case__ : Any = input_values
if return_tensors is not None:
snake_case__ : List[str] = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
| 38 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 | 1 |
'''simple docstring'''
import os
import sys
A_ : Dict = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
A_ : Optional[Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase__ ( *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return AutoConfig.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase__ ( *__magic_name__ : int , **__magic_name__ : int ) -> str:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase__ ( *__magic_name__ : List[str] , **__magic_name__ : int ) -> Optional[Any]:
'''simple docstring'''
return AutoModel.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase__ ( *__magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase__ ( *__magic_name__ : List[str] , **__magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase__ ( *__magic_name__ : Dict , **__magic_name__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase__ ( *__magic_name__ : Dict , **__magic_name__ : Tuple ) -> int:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*__magic_name__ , **__magic_name__ )
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : Union[str, Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__magic_name__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__magic_name__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__magic_name__ )
return parser.parse_args()
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = parse_args()
# Import training_script as a module.
snake_case__ : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case__ : List[Any] = script_fpath.stem
snake_case__ : List[str] = importlib.import_module(__magic_name__ )
# Patch sys.argv
snake_case__ : Optional[int] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 38 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : int = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''mra'''
def __init__( self , __SCREAMING_SNAKE_CASE=5_0_2_6_5 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="full" , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = vocab_size
snake_case__ : Optional[int] = max_position_embeddings
snake_case__ : List[Any] = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : List[str] = type_vocab_size
snake_case__ : str = layer_norm_eps
snake_case__ : Dict = position_embedding_type
snake_case__ : Union[str, Any] = block_per_row
snake_case__ : str = approx_mode
snake_case__ : int = initial_prior_first_n_blocks
snake_case__ : str = initial_prior_diagonal_n_blocks
| 38 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=True , ):
super().__init__()
snake_case__ : str = layers_per_block
snake_case__ : int = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : List[Any] = None
snake_case__ : List[Any] = nn.ModuleList([] )
# down
snake_case__ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = output_channel
snake_case__ : Union[str, Any] = block_out_channels[i]
snake_case__ : int = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : str = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
snake_case__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
snake_case__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : str = 2 * out_channels if double_z else out_channels
snake_case__ : int = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : Union[str, Any] = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = x
snake_case__ : int = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
snake_case__ : List[str] = down_block(__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
snake_case__ : Any = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
snake_case__ : Any = layers_per_block
snake_case__ : Optional[Any] = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : Optional[int] = in_channels if norm_type == """spatial""" else None
# mid
snake_case__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
snake_case__ : List[Any] = list(reversed(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = output_channel
snake_case__ : Optional[Any] = reversed_block_out_channels[i]
snake_case__ : List[str] = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : int = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : int = output_channel
# out
if norm_type == "spatial":
snake_case__ : List[Any] = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : Union[str, Any] = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : int = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = z
snake_case__ : Any = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
snake_case__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
snake_case__ : int = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : List[Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : Dict = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
snake_case__ : Optional[Any] = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="random" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
super().__init__()
snake_case__ : int = n_e
snake_case__ : Optional[int] = vq_embed_dim
snake_case__ : int = beta
snake_case__ : Optional[int] = legacy
snake_case__ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
snake_case__ : Optional[Any] = self.used.shape[0]
snake_case__ : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ : Dict = self.re_embed
snake_case__ : List[str] = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
snake_case__ : Union[str, Any] = n_e
snake_case__ : str = sane_index_shape
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : Dict = inds.reshape(ishape[0] , -1 )
snake_case__ : Any = self.used.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ : List[Any] = match.argmax(-1 )
snake_case__ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ : Optional[Any] = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : int = inds.reshape(ishape[0] , -1 )
snake_case__ : Optional[int] = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ : List[Any] = 0 # simply set to zero
snake_case__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# reshape z -> (batch, height, width, channel) and flatten
snake_case__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ : Dict = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
snake_case__ : Union[str, Any] = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
snake_case__ : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ : str = self.remap_to_used(__SCREAMING_SNAKE_CASE )
snake_case__ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ : Optional[int] = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ : int = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
snake_case__ : str = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
snake_case__ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : Tuple = parameters
snake_case__ , snake_case__ : Any = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
snake_case__ : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ : Optional[int] = deterministic
snake_case__ : Optional[int] = torch.exp(0.5 * self.logvar )
snake_case__ : Any = torch.exp(self.logvar )
if self.deterministic:
snake_case__ : List[str] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
# make sure sample is on the same device as the parameters and has same dtype
snake_case__ : Dict = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ : Optional[int] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.mean
| 38 | 1 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , __SCREAMING_SNAKE_CASE , )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = '''roberta'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = RobertaEmbeddings(__SCREAMING_SNAKE_CASE )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , __SCREAMING_SNAKE_CASE , )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = '''roberta'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__(__SCREAMING_SNAKE_CASE )
snake_case__ : int = config.num_labels
snake_case__ : str = config.num_hidden_layers
snake_case__ : Optional[int] = DeeRobertaModel(__SCREAMING_SNAKE_CASE )
snake_case__ : int = nn.Dropout(config.hidden_dropout_prob )
snake_case__ : Dict = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=-1 , __SCREAMING_SNAKE_CASE=False , ):
snake_case__ : Tuple = self.num_layers
try:
snake_case__ : List[Any] = self.roberta(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE , )
snake_case__ : int = outputs[1]
snake_case__ : str = self.dropout(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = self.classifier(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case__ : Optional[int] = e.message
snake_case__ : Dict = e.exit_layer
snake_case__ : Optional[int] = outputs[0]
if not self.training:
snake_case__ : Any = entropy(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = []
snake_case__ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case__ : str = MSELoss()
snake_case__ : List[str] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case__ : Optional[Any] = CrossEntropyLoss()
snake_case__ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case__ : Optional[int] = []
for highway_exit in outputs[-1]:
snake_case__ : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(__SCREAMING_SNAKE_CASE )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case__ : Dict = MSELoss()
snake_case__ : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case__ : int = CrossEntropyLoss()
snake_case__ : Dict = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__SCREAMING_SNAKE_CASE )
if train_highway:
snake_case__ : Any = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case__ : Union[str, Any] = (loss,) + outputs
if not self.training:
snake_case__ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case__ : Optional[int] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 38 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : str = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = backbone_out_indices
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = backbone_featmap_shape
snake_case__ : List[Any] = scope
snake_case__ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
snake_case__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : Dict = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = DPTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = False
snake_case__ : str = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
snake_case__ : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case__ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case__ : List[str] = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = """add"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
snake_case__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs.predicted_depth
# verify the predicted depth
snake_case__ : Any = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 1 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
A_ : Optional[int] = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case__ : int = int(re.match(R""".*layer_(\d*).*""" , __magic_name__ )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Tuple:
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
snake_case__ : List[Any] = re.search(R"""[^\d](\d+)$""" , str(__magic_name__ ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
snake_case__ : Optional[Any] = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if bloom_config_file == "":
snake_case__ : Tuple = BloomConfig()
else:
snake_case__ : str = BloomConfig.from_json_file(__magic_name__ )
if shard_model:
snake_case__ : str = os.listdir(__magic_name__ )
snake_case__ : Union[str, Any] = sorted(filter(lambda __magic_name__ : s.startswith("""layer""" ) and "model_00" in s , __magic_name__ ) )
snake_case__ : List[Any] = {"""weight_map""": {}, """metadata""": {}}
snake_case__ : Any = 0
snake_case__ : Optional[int] = None
snake_case__ : Tuple = BloomConfig()
for j, file in enumerate(__magic_name__ ):
print("""Processing file: {}""".format(__magic_name__ ) )
snake_case__ : Any = None
for i in range(__magic_name__ ):
# load all TP files
snake_case__ : int = file.replace("""model_00""" , f"model_0{i}" )
snake_case__ : Union[str, Any] = torch.load(os.path.join(__magic_name__ , __magic_name__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
snake_case__ : Tuple = list(temp.keys() )
for key in keys:
snake_case__ : str = temp.pop(__magic_name__ )
if tensors is None:
snake_case__ : List[Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(__magic_name__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : str = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : str = torch.cat([tensors[key], temp[key]] , dim=__magic_name__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__magic_name__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : Tuple = tensors[key] / pretraining_tp
torch.save(
__magic_name__ , os.path.join(
__magic_name__ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(__magic_name__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case__ : int = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case__ : Union[str, Any] = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(__magic_name__ ) ).zfill(5 ) )
snake_case__ : Dict = BloomConfig()
snake_case__ : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
snake_case__ : int = total_size
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__magic_name__ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
snake_case__ : Tuple = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + """\n"""
f.write(__magic_name__ )
else:
snake_case__ : Optional[Any] = BloomModel(__magic_name__ )
snake_case__ : Optional[Any] = os.listdir(__magic_name__ )
snake_case__ : Union[str, Any] = sorted(filter(lambda __magic_name__ : s.startswith("""layer""" ) and "model_00" in s , __magic_name__ ) )
snake_case__ : Tuple = None
for i, file in enumerate(__magic_name__ ):
snake_case__ : Dict = None
for i in range(__magic_name__ ):
# load all TP files
snake_case__ : List[str] = file.replace("""model_00""" , f"model_0{i}" )
snake_case__ : Optional[int] = torch.load(os.path.join(__magic_name__ , __magic_name__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
snake_case__ : Optional[int] = list(temp.keys() )
for key in keys:
snake_case__ : int = temp.pop(__magic_name__ )
if tensors is None:
snake_case__ : str = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__magic_name__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : Tuple = torch.cat([tensors[key], temp[key]] , dim=__magic_name__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__magic_name__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : str = tensors[key] / pretraining_tp
snake_case__ : Any = model.load_state_dict(__magic_name__ , strict=__magic_name__ )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
snake_case__ : Optional[int] = set(other_keys.missing_keys )
else:
snake_case__ : Optional[int] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
snake_case__ : Tuple = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
snake_case__ : List[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
snake_case__ : int = model.to(config.torch_dtype )
torch.save(model.state_dict() , __magic_name__ )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
A_ : Optional[int] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 38 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : int = botoa.client("""iam""" )
snake_case__ : Union[str, Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) )
snake_case__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__magic_name__ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"]
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , )
snake_case__ : List[Any] = None
if credentials_configuration == 0:
snake_case__ : Dict = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
snake_case__ : List[str] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
snake_case__ : List[str] = _ask_field("""AWS Access Key ID: """ )
snake_case__ : int = aws_access_key_id
snake_case__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ )
snake_case__ : List[str] = aws_secret_access_key
snake_case__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
snake_case__ : Optional[int] = aws_region
snake_case__ : int = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , )
if role_management == 0:
snake_case__ : Optional[Any] = _ask_field("""Enter your IAM role name: """ )
else:
snake_case__ : Optional[int] = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__magic_name__ )
snake_case__ : Dict = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Any = None
if is_custom_docker_image:
snake_case__ : str = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() )
snake_case__ : Tuple = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : List[Any] = None
if is_sagemaker_inputs_enabled:
snake_case__ : str = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Optional[int] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case__ : List[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Tuple = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
snake_case__ : Any = {}
snake_case__ : List[Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
snake_case__ : str = """dynamo_"""
snake_case__ : Tuple = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case__ : List[str] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
snake_case__ : str = _ask_options(
"""Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , )
snake_case__ : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : str = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Dict = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
snake_case__ : List[str] = _ask_options(
__magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case__ : Optional[int] = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" )
snake_case__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case__ : Optional[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , __magic_name__ , default=1 , )
snake_case__ : Union[str, Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
| 38 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
A_ : int = False
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self ):
return 1_2
@property
def __UpperCamelCase ( self ):
return 1_2
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__SCREAMING_SNAKE_CASE )
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Any = 1_2
snake_case__ : Any = 1_2
snake_case__ : Any = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case__ : int = TransformeraDModel(**__SCREAMING_SNAKE_CASE )
return model
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = """cpu"""
snake_case__ : int = self.dummy_vqvae
snake_case__ : Union[str, Any] = self.dummy_text_encoder
snake_case__ : str = self.dummy_tokenizer
snake_case__ : List[Any] = self.dummy_transformer
snake_case__ : Union[str, Any] = VQDiffusionScheduler(self.num_embed )
snake_case__ : Tuple = LearnedClassifierFreeSamplingEmbeddings(learnable=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = VQDiffusionPipeline(
vqvae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , transformer=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=__SCREAMING_SNAKE_CASE , )
snake_case__ : List[str] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = """teddy bear playing in the pool"""
snake_case__ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : Union[str, Any] = pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""np""" )
snake_case__ : List[Any] = output.images
snake_case__ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : Optional[int] = pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , return_dict=__SCREAMING_SNAKE_CASE , num_inference_steps=2 )[0]
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
snake_case__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case__ : List[Any] = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self ):
snake_case__ : int = """cpu"""
snake_case__ : Optional[Any] = self.dummy_vqvae
snake_case__ : Optional[int] = self.dummy_text_encoder
snake_case__ : List[Any] = self.dummy_tokenizer
snake_case__ : List[str] = self.dummy_transformer
snake_case__ : Tuple = VQDiffusionScheduler(self.num_embed )
snake_case__ : Dict = LearnedClassifierFreeSamplingEmbeddings(
learnable=__SCREAMING_SNAKE_CASE , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case__ : Union[str, Any] = VQDiffusionPipeline(
vqvae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , transformer=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=__SCREAMING_SNAKE_CASE , )
snake_case__ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """teddy bear playing in the pool"""
snake_case__ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : Optional[Any] = pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""np""" )
snake_case__ : int = output.images
snake_case__ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : Any = pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , return_dict=__SCREAMING_SNAKE_CASE , num_inference_steps=2 )[0]
snake_case__ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case__ : Optional[Any] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case__ : Dict = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case__ : int = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case__ : List[str] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : List[str] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
snake_case__ : Any = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 38 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list[int] , __magic_name__ : list[int] , __magic_name__ : int ) -> tuple[float, list[float]]:
'''simple docstring'''
snake_case__ : List[str] = list(range(len(__magic_name__ ) ) )
snake_case__ : int = [v / w for v, w in zip(__magic_name__ , __magic_name__ )]
index.sort(key=lambda __magic_name__ : ratio[i] , reverse=__magic_name__ )
snake_case__ : float = 0
snake_case__ : list[float] = [0] * len(__magic_name__ )
for i in index:
if weight[i] <= capacity:
snake_case__ : List[str] = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case__ : Optional[int] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Union[str, Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 38 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = 6 ):
snake_case__ : Node | None = None
snake_case__ : Node | None = None
self.create_linked_list(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = Node()
snake_case__ : str = current_node
snake_case__ : List[Any] = current_node
snake_case__ : Union[str, Any] = current_node
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = Node()
snake_case__ : Union[str, Any] = current_node
snake_case__ : Tuple = previous_node
snake_case__ : int = current_node
snake_case__ : int = self.front
snake_case__ : str = previous_node
def __UpperCamelCase ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __UpperCamelCase ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
snake_case__ : List[str] = self.rear.next
if self.rear:
snake_case__ : Tuple = data
def __UpperCamelCase ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
snake_case__ : int = self.front.data
snake_case__ : Tuple = None
return data
snake_case__ : Any = self.front
snake_case__ : Optional[int] = old_front.next
snake_case__ : Union[str, Any] = old_front.data
snake_case__ : Optional[int] = None
return data
def __UpperCamelCase ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def __UpperCamelCase ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __snake_case :
'''simple docstring'''
def __init__( self ):
snake_case__ : Any | None = None
snake_case__ : Node | None = None
snake_case__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int ) -> int:
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
snake_case__ : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
snake_case__ : Dict = nums[0]
snake_case__ : str = 0
for num in nums[1:]:
snake_case__ , snake_case__ : int = (
max_excluding + num,
max(__magic_name__ , __magic_name__ ),
)
return max(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A_ : List[Any] = logging.get_logger(__name__)
A_ : List[str] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
snake_case__ : Optional[Any] = self.model.config
else:
snake_case__ : int = config
snake_case__ : List[str] = data_args
snake_case__ : Union[str, Any] = self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
""" padding..""" )
if self.args.label_smoothing == 0:
snake_case__ : Union[str, Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case__ : Optional[int] = label_smoothed_nll_loss
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if self.optimizer is None:
snake_case__ : Tuple = ["""bias""", """LayerNorm.weight"""]
snake_case__ : Tuple = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
snake_case__ : Dict = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case__ : str = Adafactor
snake_case__ : Dict = {"""scale_parameter""": False, """relative_step""": False}
else:
snake_case__ : Union[str, Any] = AdamW
snake_case__ : Dict = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
snake_case__ : Union[str, Any] = self.args.learning_rate
if self.sharded_ddp:
snake_case__ : Dict = OSS(
params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
snake_case__ : Tuple = optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
snake_case__ : Union[str, Any] = self._get_lr_scheduler(__SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case__ : Optional[int] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case__ : int = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case__ : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE )
return scheduler
def __UpperCamelCase ( self ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case__ : str = model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
snake_case__ : Union[str, Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case__ , snake_case__ : Tuple = model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
snake_case__ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
snake_case__ : str = torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
snake_case__ , snake_case__ : str = self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = inputs.pop("""labels""" )
snake_case__ , snake_case__ : Optional[int] = self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return loss
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[str] = self._prepare_inputs(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case__ : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case__ : Optional[int] = self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
snake_case__ : List[Any] = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
snake_case__ , snake_case__ : str = self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case__ : Tuple = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case__ : List[Any] = self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If PAD token is not defined at least EOS token has to be defined
snake_case__ : List[str] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f" padded to `max_length`={max_length}" )
snake_case__ : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case__ : int = tensor
return padded_tensor
| 38 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : int = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : Any = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(1 )
snake_case__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Tuple = dc.update(2 )
snake_case__ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(3 )
snake_case__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : int = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 38 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Optional[Any] = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''xlm'''
lowerCamelCase__ = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self , __SCREAMING_SNAKE_CASE=3_0_1_4_5 , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=2_0_4_8**-0.5 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="first" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : Tuple = vocab_size
snake_case__ : Any = emb_dim
snake_case__ : str = n_layers
snake_case__ : Dict = n_heads
snake_case__ : Union[str, Any] = dropout
snake_case__ : Union[str, Any] = attention_dropout
snake_case__ : str = gelu_activation
snake_case__ : List[str] = sinusoidal_embeddings
snake_case__ : Optional[Any] = causal
snake_case__ : int = asm
snake_case__ : List[str] = n_langs
snake_case__ : Any = use_lang_emb
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[Any] = bos_index
snake_case__ : Optional[int] = eos_index
snake_case__ : str = pad_index
snake_case__ : Optional[Any] = unk_index
snake_case__ : Tuple = mask_index
snake_case__ : Tuple = is_encoder
snake_case__ : Any = max_position_embeddings
snake_case__ : Tuple = embed_init_std
snake_case__ : int = init_std
snake_case__ : List[str] = summary_type
snake_case__ : List[str] = summary_use_proj
snake_case__ : int = summary_activation
snake_case__ : Union[str, Any] = summary_proj_to_labels
snake_case__ : Tuple = summary_first_dropout
snake_case__ : List[Any] = start_n_top
snake_case__ : Tuple = end_n_top
snake_case__ : Union[str, Any] = mask_token_id
snake_case__ : Dict = lang_id
if "n_words" in kwargs:
snake_case__ : str = kwargs["""n_words"""]
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCamelCase ( self ):
if self.task == "multiple-choice":
snake_case__ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case__ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 38 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
A_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A_ : Tuple = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : List[Any]=8 ) -> str:
'''simple docstring'''
snake_case__ : Tuple = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
snake_case__ : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , movq=__SCREAMING_SNAKE_CASE , )
snake_case__ : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if latents is None:
snake_case__ : Optional[Any] = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case__ : List[str] = latents.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
snake_case__ : List[str] = self.tokenizer(
__SCREAMING_SNAKE_CASE , padding="""max_length""" , truncation=__SCREAMING_SNAKE_CASE , max_length=7_7 , return_attention_mask=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
snake_case__ : str = text_inputs.input_ids
snake_case__ : Optional[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
snake_case__ : Union[str, Any] = text_input_ids.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = text_inputs.attention_mask.to(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ : Tuple = self.text_encoder(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = prompt_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
snake_case__ : Optional[int] = text_encoder_hidden_states.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
snake_case__ : List[Any] = text_mask.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
snake_case__ : List[str]
if negative_prompt is None:
snake_case__ : Any = [""""""] * batch_size
elif type(__SCREAMING_SNAKE_CASE ) is not type(__SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__SCREAMING_SNAKE_CASE )} !="
f" {type(__SCREAMING_SNAKE_CASE )}." )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [negative_prompt]
elif batch_size != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
""" the batch size of `prompt`.""" )
else:
snake_case__ : Optional[Any] = negative_prompt
snake_case__ : Dict = self.tokenizer(
__SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=7_7 , truncation=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
snake_case__ : int = uncond_input.input_ids.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = uncond_input.attention_mask.to(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ : Union[str, Any] = self.text_encoder(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case__ : Any = negative_prompt_embeds.shape[1]
snake_case__ : Optional[int] = negative_prompt_embeds.repeat(1 , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = uncond_text_encoder_hidden_states.shape[1]
snake_case__ : Optional[Any] = uncond_text_encoder_hidden_states.repeat(1 , __SCREAMING_SNAKE_CASE , 1 )
snake_case__ : str = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , __SCREAMING_SNAKE_CASE , -1 )
snake_case__ : Any = uncond_text_mask.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case__ : List[str] = torch.cat([negative_prompt_embeds, prompt_embeds] )
snake_case__ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
snake_case__ : str = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case__ : Optional[int] = torch.device(f"cuda:{gpu_id}" )
snake_case__ : Dict = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
snake_case__ : List[str] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case__ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
snake_case__ , snake_case__ : Optional[Any] = cpu_offload_with_hook(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prev_module_hook=__SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
snake_case__ , snake_case__ : Tuple = cpu_offload_with_hook(self.safety_checker , __SCREAMING_SNAKE_CASE , prev_module_hook=__SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
snake_case__ : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCamelCase ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__SCREAMING_SNAKE_CASE , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 5_1_2 , __SCREAMING_SNAKE_CASE = 5_1_2 , __SCREAMING_SNAKE_CASE = 1_0_0 , __SCREAMING_SNAKE_CASE = 4.0 , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = 1
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = len(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__SCREAMING_SNAKE_CASE )}" )
snake_case__ : str = self._execution_device
snake_case__ : List[Any] = batch_size * num_images_per_prompt
snake_case__ : Dict = guidance_scale > 1.0
snake_case__ , snake_case__ , snake_case__ : List[str] = self._encode_prompt(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
snake_case__ : Optional[Any] = image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
snake_case__ : Optional[int] = negative_image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE , dim=0 )
snake_case__ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=__SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
snake_case__ : int = self.scheduler.timesteps
snake_case__ : int = self.unet.config.in_channels
snake_case__ , snake_case__ : Union[str, Any] = get_new_h_w(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
snake_case__ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(__SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
snake_case__ : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case__ : Any = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
snake_case__ : Dict = self.unet(
sample=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , added_cond_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
snake_case__ , snake_case__ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
snake_case__ , snake_case__ : Optional[Any] = noise_pred.chunk(2 )
snake_case__ , snake_case__ : Union[str, Any] = variance_pred.chunk(2 )
snake_case__ : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case__ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case__ , snake_case__ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case__ : Optional[Any] = self.scheduler.step(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
snake_case__ : Optional[int] = self.movq.decode(__SCREAMING_SNAKE_CASE , force_not_quantize=__SCREAMING_SNAKE_CASE )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case__ : List[Any] = image * 0.5 + 0.5
snake_case__ : List[str] = image.clamp(0 , 1 )
snake_case__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case__ : int = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 | 1 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
A_ : int = threading.Lock()
A_ : Optional[logging.Handler] = None
A_ : str = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
A_ : Dict = logging.WARNING
A_ : Tuple = True
def UpperCamelCase__ ( ) -> int:
'''simple docstring'''
snake_case__ : Dict = os.getenv("""TRANSFORMERS_VERBOSITY""" , __magic_name__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
return __name__.split(""".""" )[0]
def UpperCamelCase__ ( ) -> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
snake_case__ : int = logging.StreamHandler() # Set sys.stderr as stream.
snake_case__ : Optional[int] = sys.stderr.flush
# Apply our default configuration to the library root logger.
snake_case__ : Dict = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
snake_case__ : List[str] = False
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
snake_case__ : Optional[Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
snake_case__ : str = None
def UpperCamelCase__ ( ) -> List[str]:
'''simple docstring'''
return log_levels
def UpperCamelCase__ ( __magic_name__ : Optional[str] = None ) -> logging.Logger:
'''simple docstring'''
if name is None:
snake_case__ : Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__magic_name__ )
def UpperCamelCase__ ( ) -> int:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCamelCase__ ( __magic_name__ : int ) -> None:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(__magic_name__ )
def UpperCamelCase__ ( ) -> List[Any]:
'''simple docstring'''
return set_verbosity(__magic_name__ )
def UpperCamelCase__ ( ) -> List[str]:
'''simple docstring'''
return set_verbosity(__magic_name__ )
def UpperCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
return set_verbosity(__magic_name__ )
def UpperCamelCase__ ( ) -> Any:
'''simple docstring'''
return set_verbosity(__magic_name__ )
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCamelCase__ ( __magic_name__ : logging.Handler ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : logging.Handler ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__magic_name__ )
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
snake_case__ : List[str] = False
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
snake_case__ : str = True
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
snake_case__ : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
snake_case__ : Optional[int] = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(__magic_name__ )
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
snake_case__ : Any = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__magic_name__ )
def UpperCamelCase__ ( self : List[Any] , *__magic_name__ : str , **__magic_name__ : List[str] ) -> Any:
'''simple docstring'''
snake_case__ : Dict = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , __magic_name__ )
if no_advisory_warnings:
return
self.warning(*__magic_name__ , **__magic_name__ )
A_ : List[str] = warning_advice
@functools.lru_cache(__magic_name__ )
def UpperCamelCase__ ( self : Optional[int] , *__magic_name__ : int , **__magic_name__ : List[Any] ) -> Dict:
'''simple docstring'''
self.warning(*__magic_name__ , **__magic_name__ )
A_ : Dict = warning_once
class __snake_case :
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): # pylint: disable=unused-argument
snake_case__ : List[str] = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self , __SCREAMING_SNAKE_CASE ):
def empty_fn(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return
class __snake_case :
'''simple docstring'''
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
if _tqdm_active:
return tqdm_lib.tqdm(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
else:
return EmptyTqdm(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
snake_case__ : str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A_ : List[str] = _tqdm_cls()
def UpperCamelCase__ ( ) -> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def UpperCamelCase__ ( ) -> List[Any]:
'''simple docstring'''
global _tqdm_active
snake_case__ : List[str] = True
hf_hub_utils.enable_progress_bars()
def UpperCamelCase__ ( ) -> Any:
'''simple docstring'''
global _tqdm_active
snake_case__ : List[str] = False
hf_hub_utils.disable_progress_bars()
| 38 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | 1 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(
features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : Tuple = Generator(
cache_dir=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , gen_kwargs=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self ):
# Build iterable dataset
if self.streaming:
snake_case__ : Dict = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
snake_case__ : Optional[Any] = None
snake_case__ : List[Any] = None
snake_case__ : Dict = None
snake_case__ : int = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
snake_case__ : Optional[int] = self.builder.as_dataset(
split="""train""" , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
| 38 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38 | 1 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = CustomTokenizer
pass
| 38 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 | 1 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 1 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCamelCase__ = '''down'''
def __UpperCamelCase ( self ):
snake_case__ : int = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = AttnDownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
def __UpperCamelCase ( self ):
snake_case__ : str = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Optional[int] = super().prepare_init_args_and_inputs_for_common()
snake_case__ : List[str] = 3_2
return init_dict, inputs_dict
def __UpperCamelCase ( self ):
snake_case__ : int = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
@property
def __UpperCamelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
snake_case__ : List[str] = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = SkipDownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
@property
def __UpperCamelCase ( self ):
return super().get_dummy_input(include_skip_sample=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCamelCase__ = '''down'''
@property
def __UpperCamelCase ( self ):
return super().get_dummy_input(include_skip_sample=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : int = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DownEncoderBlockaD # noqa F405
lowerCamelCase__ = '''down'''
@property
def __UpperCamelCase ( self ):
return super().get_dummy_input(include_temb=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : int = {
"""in_channels""": 3_2,
"""out_channels""": 3_2,
}
snake_case__ : Tuple = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase ( self ):
snake_case__ : Dict = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCamelCase__ = '''down'''
@property
def __UpperCamelCase ( self ):
return super().get_dummy_input(include_temb=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = {
"""in_channels""": 3_2,
"""out_channels""": 3_2,
}
snake_case__ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase ( self ):
snake_case__ : Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = UNetMidBlockaD # noqa F405
lowerCamelCase__ = '''mid'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = {
"""in_channels""": 3_2,
"""temb_channels""": 1_2_8,
}
snake_case__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCamelCase__ = '''mid'''
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : int = super().prepare_init_args_and_inputs_for_common()
snake_case__ : Any = 3_2
return init_dict, inputs_dict
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCamelCase__ = '''mid'''
@property
def __UpperCamelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Optional[int] = super().prepare_init_args_and_inputs_for_common()
snake_case__ : Tuple = 3_2
return init_dict, inputs_dict
def __UpperCamelCase ( self ):
snake_case__ : str = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = UpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __UpperCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Any = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __UpperCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __UpperCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Optional[int] = super().prepare_init_args_and_inputs_for_common()
snake_case__ : Dict = 3_2
return init_dict, inputs_dict
def __UpperCamelCase ( self ):
snake_case__ : Dict = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __UpperCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE , include_encoder_hidden_states=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
snake_case__ : str = 3_2
return init_dict, inputs_dict
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = AttnUpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __UpperCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def __UpperCamelCase ( self ):
snake_case__ : int = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = SkipUpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __UpperCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __UpperCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = UpDecoderBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __UpperCamelCase ( self ):
return super().get_dummy_input(include_temb=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = {"""in_channels""": 3_2, """out_channels""": 3_2}
snake_case__ : Tuple = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCamelCase__ = '''up'''
@property
def __UpperCamelCase ( self ):
return super().get_dummy_input(include_temb=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = {"""in_channels""": 3_2, """out_channels""": 3_2}
snake_case__ : int = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase ( self ):
snake_case__ : int = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(__SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
from __future__ import annotations
A_ : str = "Muhammad Umer Farooq"
A_ : Optional[Any] = "MIT"
A_ : int = "1.0.0"
A_ : int = "Muhammad Umer Farooq"
A_ : int = "[email protected]"
A_ : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : list[str] = []
snake_case__ : List[Any] = domain
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ : str = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__magic_name__ ).split(""".""" )[-2:] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return parse.urlparse(__magic_name__ ).netloc
def UpperCamelCase__ ( __magic_name__ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case__ : Optional[Any] = Parser(__magic_name__ )
try:
# Open URL
snake_case__ : Any = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ : Tuple = requests.get(__magic_name__ )
# Get the valid email.
snake_case__ : List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
A_ : str = emails_from_url("https://github.com")
print(F'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 38 | 1 |
'''simple docstring'''
from math import loga
def UpperCamelCase__ ( __magic_name__ : int ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
A_ : List[Any] = logging.get_logger("transformers.models.encodec")
A_ : Dict = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
A_ : Optional[int] = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
A_ : List[str] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
A_ : List[str] = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
A_ : List[Any] = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
A_ : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
A_ : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
A_ : Any = []
A_ : List[str] = []
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict ) -> List[str]:
'''simple docstring'''
for attribute in key.split(""".""" ):
snake_case__ : Tuple = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
snake_case__ : List[Any] = getattr(__magic_name__ , __magic_name__ ).shape
else:
snake_case__ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_g":
snake_case__ : List[str] = value
elif weight_type == "weight_v":
snake_case__ : str = value
elif weight_type == "bias":
snake_case__ : Dict = value
elif weight_type == "running_mean":
snake_case__ : Tuple = value
elif weight_type == "running_var":
snake_case__ : Tuple = value
elif weight_type == "num_batches_tracked":
snake_case__ : str = value
elif weight_type == "weight_ih_l0":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_hh_l0":
snake_case__ : int = value
elif weight_type == "bias_ih_l0":
snake_case__ : List[str] = value
elif weight_type == "bias_hh_l0":
snake_case__ : Optional[Any] = value
elif weight_type == "weight_ih_l1":
snake_case__ : Any = value
elif weight_type == "weight_hh_l1":
snake_case__ : Any = value
elif weight_type == "bias_ih_l1":
snake_case__ : List[str] = value
elif weight_type == "bias_hh_l1":
snake_case__ : Tuple = value
else:
snake_case__ : List[str] = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str ) -> Optional[int]:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case__ , snake_case__ : Dict = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = []
if model_name == "encodec_24khz" or "encodec_32khz":
snake_case__ : Optional[int] = MAPPING_24K
elif model_name == "encodec_48khz":
snake_case__ : Union[str, Any] = MAPPING_48K
else:
raise ValueError(f"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(__magic_name__ , __magic_name__ ):
logger.info(f"{name} was ignored" )
continue
snake_case__ : Dict = False
for key, mapped_key in MAPPING.items():
if "*" in key:
snake_case__ , snake_case__ : Union[str, Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
snake_case__ : Union[str, Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
snake_case__ : List[str] = True
if "*" in mapped_key:
snake_case__ : Tuple = name.split(__magic_name__ )[0].split(""".""" )[-2]
snake_case__ : str = mapped_key.replace("""*""" , __magic_name__ )
if "weight_g" in name:
snake_case__ : Optional[int] = """weight_g"""
elif "weight_v" in name:
snake_case__ : List[Any] = """weight_v"""
elif "weight_ih_l0" in name:
snake_case__ : Tuple = """weight_ih_l0"""
elif "weight_hh_l0" in name:
snake_case__ : Any = """weight_hh_l0"""
elif "bias_ih_l0" in name:
snake_case__ : List[str] = """bias_ih_l0"""
elif "bias_hh_l0" in name:
snake_case__ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
snake_case__ : Optional[int] = """weight_ih_l1"""
elif "weight_hh_l1" in name:
snake_case__ : Optional[Any] = """weight_hh_l1"""
elif "bias_ih_l1" in name:
snake_case__ : Any = """bias_ih_l1"""
elif "bias_hh_l1" in name:
snake_case__ : Optional[int] = """bias_hh_l1"""
elif "bias" in name:
snake_case__ : Union[str, Any] = """bias"""
elif "weight" in name:
snake_case__ : Union[str, Any] = """weight"""
elif "running_mean" in name:
snake_case__ : int = """running_mean"""
elif "running_var" in name:
snake_case__ : int = """running_var"""
elif "num_batches_tracked" in name:
snake_case__ : List[Any] = """num_batches_tracked"""
else:
snake_case__ : Optional[Any] = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f"Unused weights: {unused_weights}" )
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : int=None , __magic_name__ : Any=None , ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
snake_case__ : List[str] = EncodecConfig.from_pretrained(__magic_name__ )
else:
snake_case__ : List[Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
snake_case__ : Union[str, Any] = [8, 5, 4, 4]
snake_case__ : Union[str, Any] = [2.2]
snake_case__ : Any = 64
snake_case__ : List[str] = 3_20_00
snake_case__ : List[Any] = 20_48
snake_case__ : str = False
snake_case__ : List[str] = False
snake_case__ : List[str] = False
elif model_name == "encodec_48khz":
snake_case__ : str = [8, 5, 4, 2]
snake_case__ : int = [3.0, 6.0, 12.0, 24.0]
snake_case__ : int = 4_80_00
snake_case__ : Optional[int] = 2
snake_case__ : int = False
snake_case__ : Dict = """time_group_norm"""
snake_case__ : Tuple = True
snake_case__ : List[Any] = 1.0
snake_case__ : str = 0.01
else:
raise ValueError(f"Unknown model name: {model_name}" )
snake_case__ : List[Any] = EncodecModel(__magic_name__ )
snake_case__ : str = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__magic_name__ )
snake_case__ : Dict = torch.load(__magic_name__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
snake_case__ : Optional[Any] = original_checkpoint["""best_state"""]
recursively_load_weights(__magic_name__ , __magic_name__ , __magic_name__ )
model.save_pretrained(__magic_name__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(__magic_name__ )
model.push_to_hub(__magic_name__ )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = StableUnCLIPPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[str] = 3_2
snake_case__ : List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
snake_case__ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
snake_case__ : Optional[int] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=__SCREAMING_SNAKE_CASE , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
snake_case__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=__SCREAMING_SNAKE_CASE , num_layers=1 , )
torch.manual_seed(0 )
snake_case__ : Tuple = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_0_0_0 , clip_sample=__SCREAMING_SNAKE_CASE , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
snake_case__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=__SCREAMING_SNAKE_CASE )
snake_case__ : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
snake_case__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
snake_case__ : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
snake_case__ : Dict = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__SCREAMING_SNAKE_CASE , layers_per_block=1 , upcast_attention=__SCREAMING_SNAKE_CASE , use_linear_projection=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
snake_case__ : List[Any] = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , )
torch.manual_seed(0 )
snake_case__ : Dict = AutoencoderKL()
snake_case__ : Optional[int] = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[str] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self ):
snake_case__ : List[str] = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Any = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
snake_case__ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
snake_case__ : Optional[Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case__ : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case__ : int = pipe("""anime turle""" , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" )
snake_case__ : Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
snake_case__ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case__ : int = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
snake_case__ : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 38 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 1 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Optional[Any]=10_00 ) -> List[str]:
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case__ : int = n - 1
snake_case__ : Union[str, Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case__ : str = 0
while count < prec:
snake_case__ : Any = random.randint(2 , n - 1 )
snake_case__ : Optional[Any] = bin_exp_mod(__magic_name__ , __magic_name__ , __magic_name__ )
if b != 1:
snake_case__ : Optional[int] = True
for _ in range(__magic_name__ ):
if b == n - 1:
snake_case__ : Any = False
break
snake_case__ : List[str] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A_ : Optional[Any] = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 38 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38 | 1 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[Any] = AlbertConfig.from_json_file(__magic_name__ )
print(f"Building PyTorch model from configuration: {config}" )
snake_case__ : Optional[int] = AlbertForPreTraining(__magic_name__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , __magic_name__ )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 38 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=True , ):
super().__init__()
snake_case__ : str = layers_per_block
snake_case__ : int = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : List[Any] = None
snake_case__ : List[Any] = nn.ModuleList([] )
# down
snake_case__ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = output_channel
snake_case__ : Union[str, Any] = block_out_channels[i]
snake_case__ : int = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : str = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
snake_case__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
snake_case__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : str = 2 * out_channels if double_z else out_channels
snake_case__ : int = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : Union[str, Any] = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = x
snake_case__ : int = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
snake_case__ : List[str] = down_block(__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
snake_case__ : Any = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
snake_case__ : Any = layers_per_block
snake_case__ : Optional[Any] = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : Optional[int] = in_channels if norm_type == """spatial""" else None
# mid
snake_case__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
snake_case__ : List[Any] = list(reversed(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = output_channel
snake_case__ : Optional[Any] = reversed_block_out_channels[i]
snake_case__ : List[str] = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : int = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : int = output_channel
# out
if norm_type == "spatial":
snake_case__ : List[Any] = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : Union[str, Any] = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : int = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = z
snake_case__ : Any = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
snake_case__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
snake_case__ : int = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : List[Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : Dict = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
snake_case__ : Optional[Any] = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="random" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
super().__init__()
snake_case__ : int = n_e
snake_case__ : Optional[int] = vq_embed_dim
snake_case__ : int = beta
snake_case__ : Optional[int] = legacy
snake_case__ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
snake_case__ : Optional[Any] = self.used.shape[0]
snake_case__ : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ : Dict = self.re_embed
snake_case__ : List[str] = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
snake_case__ : Union[str, Any] = n_e
snake_case__ : str = sane_index_shape
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : Dict = inds.reshape(ishape[0] , -1 )
snake_case__ : Any = self.used.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ : List[Any] = match.argmax(-1 )
snake_case__ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ : Optional[Any] = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : int = inds.reshape(ishape[0] , -1 )
snake_case__ : Optional[int] = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ : List[Any] = 0 # simply set to zero
snake_case__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# reshape z -> (batch, height, width, channel) and flatten
snake_case__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ : Dict = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
snake_case__ : Union[str, Any] = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
snake_case__ : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ : str = self.remap_to_used(__SCREAMING_SNAKE_CASE )
snake_case__ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ : Optional[int] = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ : int = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
snake_case__ : str = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
snake_case__ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : Tuple = parameters
snake_case__ , snake_case__ : Any = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
snake_case__ : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ : Optional[int] = deterministic
snake_case__ : Optional[int] = torch.exp(0.5 * self.logvar )
snake_case__ : Any = torch.exp(self.logvar )
if self.deterministic:
snake_case__ : List[str] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
# make sure sample is on the same device as the parameters and has same dtype
snake_case__ : Dict = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ : Optional[int] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.mean
| 38 | 1 |
'''simple docstring'''
import math
def UpperCamelCase__ ( __magic_name__ : float , __magic_name__ : float ) -> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__magic_name__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 38 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : str = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = backbone_out_indices
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = backbone_featmap_shape
snake_case__ : List[Any] = scope
snake_case__ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
snake_case__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : Dict = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = DPTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = False
snake_case__ : str = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
snake_case__ : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case__ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case__ : List[str] = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = """add"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
snake_case__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs.predicted_depth
# verify the predicted depth
snake_case__ : Any = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 1 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
A_ : Dict = 6378137.0
A_ : Optional[Any] = 6356752.314245
A_ : List[str] = 6378137
def UpperCamelCase__ ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ) -> float:
'''simple docstring'''
snake_case__ : List[str] = (AXIS_A - AXIS_B) / AXIS_A
snake_case__ : Any = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
snake_case__ : Dict = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
snake_case__ : str = radians(__magic_name__ )
snake_case__ : int = radians(__magic_name__ )
# Equation
snake_case__ : Dict = sin((phi_a - phi_a) / 2 )
snake_case__ : str = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
snake_case__ : List[str] = sqrt(sin_sq_phi + (cos(__magic_name__ ) * cos(__magic_name__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : int = botoa.client("""iam""" )
snake_case__ : Union[str, Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) )
snake_case__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__magic_name__ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"]
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , )
snake_case__ : List[Any] = None
if credentials_configuration == 0:
snake_case__ : Dict = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
snake_case__ : List[str] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
snake_case__ : List[str] = _ask_field("""AWS Access Key ID: """ )
snake_case__ : int = aws_access_key_id
snake_case__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ )
snake_case__ : List[str] = aws_secret_access_key
snake_case__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
snake_case__ : Optional[int] = aws_region
snake_case__ : int = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , )
if role_management == 0:
snake_case__ : Optional[Any] = _ask_field("""Enter your IAM role name: """ )
else:
snake_case__ : Optional[int] = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__magic_name__ )
snake_case__ : Dict = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Any = None
if is_custom_docker_image:
snake_case__ : str = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() )
snake_case__ : Tuple = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : List[Any] = None
if is_sagemaker_inputs_enabled:
snake_case__ : str = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Optional[int] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case__ : List[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Tuple = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
snake_case__ : Any = {}
snake_case__ : List[Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
snake_case__ : str = """dynamo_"""
snake_case__ : Tuple = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case__ : List[str] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
snake_case__ : str = _ask_options(
"""Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , )
snake_case__ : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : str = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Dict = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
snake_case__ : List[str] = _ask_options(
__magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case__ : Optional[int] = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" )
snake_case__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case__ : Optional[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , __magic_name__ , default=1 , )
snake_case__ : Union[str, Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
| 38 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : List[str] = logging.get_logger(__name__)
A_ : Optional[Any] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''swin'''
lowerCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=2_2_4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=9_6 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 1_2, 2_4] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = image_size
snake_case__ : Dict = patch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : List[str] = embed_dim
snake_case__ : Optional[Any] = depths
snake_case__ : Dict = len(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = num_heads
snake_case__ : str = window_size
snake_case__ : Union[str, Any] = mlp_ratio
snake_case__ : List[str] = qkv_bias
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : Tuple = drop_path_rate
snake_case__ : Tuple = hidden_act
snake_case__ : Union[str, Any] = use_absolute_embeddings
snake_case__ : int = layer_norm_eps
snake_case__ : List[str] = initializer_range
snake_case__ : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : Optional[Any] = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
snake_case__ : Tuple = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : List[Any] = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
| 38 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = KandinskyVaaControlnetPipeline
lowerCamelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
lowerCamelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
lowerCamelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase__ = False
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return self.time_input_dim
@property
def __UpperCamelCase ( self ):
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ):
return 1_0_0
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case__ : int = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def __UpperCamelCase ( self ):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ):
snake_case__ : str = self.dummy_unet
snake_case__ : Optional[int] = self.dummy_movq
snake_case__ : Dict = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__SCREAMING_SNAKE_CASE , )
snake_case__ : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
snake_case__ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create hint
snake_case__ : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = """cpu"""
snake_case__ : Any = self.get_dummy_components()
snake_case__ : Optional[Any] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : int = output.images
snake_case__ : int = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Tuple = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
snake_case__ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case__ : List[Any] = torch.from_numpy(np.array(__SCREAMING_SNAKE_CASE ) ).float() / 255.0
snake_case__ : Dict = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case__ : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case__ : List[Any] = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = """A robot, 4k photo"""
snake_case__ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case__ , snake_case__ : str = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case__ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case__ : List[Any] = pipeline(
image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , hint=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , output_type="""np""" , )
snake_case__ : List[str] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | 1 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
A_ : List[str] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def UpperCamelCase__ ( __magic_name__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[str] = {}
state_dict.pop("""pixel_mean""" , __magic_name__ )
state_dict.pop("""pixel_std""" , __magic_name__ )
snake_case__ : Optional[Any] = R""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case__ : Tuple = key.replace(__magic_name__ , __magic_name__ )
if re.match(__magic_name__ , __magic_name__ ):
snake_case__ : List[Any] = int(re.match(__magic_name__ , __magic_name__ ).group(2 ) )
if layer_nb == 0:
snake_case__ : str = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
snake_case__ : Union[str, Any] = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
snake_case__ : List[Any] = key.replace("""layers.2""" , """proj_out""" )
snake_case__ : Any = value
snake_case__ : Any = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Optional[int]="ybelkada/segment-anything" ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : str = hf_hub_download(__magic_name__ , f"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
snake_case__ : Union[str, Any] = SamConfig()
elif "sam_vit_l" in model_name:
snake_case__ : Optional[int] = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
snake_case__ : Union[str, Any] = SamConfig(
vision_config=__magic_name__ , )
elif "sam_vit_h" in model_name:
snake_case__ : Union[str, Any] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
snake_case__ : int = SamConfig(
vision_config=__magic_name__ , )
snake_case__ : List[Any] = torch.load(__magic_name__ , map_location="""cpu""" )
snake_case__ : List[str] = replace_keys(__magic_name__ )
snake_case__ : Union[str, Any] = SamImageProcessor()
snake_case__ : str = SamProcessor(image_processor=__magic_name__ )
snake_case__ : int = SamModel(__magic_name__ )
hf_model.load_state_dict(__magic_name__ )
snake_case__ : List[str] = hf_model.to("""cuda""" )
snake_case__ : Tuple = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ).convert("""RGB""" )
snake_case__ : List[str] = [[[4_00, 6_50]]]
snake_case__ : int = [[1]]
snake_case__ : Optional[Any] = processor(images=np.array(__magic_name__ ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
snake_case__ : Tuple = hf_model(**__magic_name__ )
snake_case__ : Optional[Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
snake_case__ : Tuple = processor(
images=np.array(__magic_name__ ) , input_points=__magic_name__ , input_labels=__magic_name__ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
snake_case__ : List[Any] = hf_model(**__magic_name__ )
snake_case__ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
snake_case__ : Any = ((75, 2_75, 17_25, 8_50),)
snake_case__ : List[Any] = processor(images=np.array(__magic_name__ ) , input_boxes=__magic_name__ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
snake_case__ : List[Any] = hf_model(**__magic_name__ )
snake_case__ : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
snake_case__ : List[Any] = [[[4_00, 6_50], [8_00, 6_50]]]
snake_case__ : Dict = [[1, 1]]
snake_case__ : int = processor(
images=np.array(__magic_name__ ) , input_points=__magic_name__ , input_labels=__magic_name__ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
snake_case__ : Optional[Any] = hf_model(**__magic_name__ )
snake_case__ : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
A_ : List[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
A_ : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 38 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38 | 1 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case__ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = -1
snake_case__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=1_0 , do_sample=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
snake_case__ : List[str] = TextStreamer(__SCREAMING_SNAKE_CASE )
model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=1_0 , do_sample=__SCREAMING_SNAKE_CASE , streamer=__SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case__ : List[str] = cs.out[:-1]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case__ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = -1
snake_case__ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=1_0 , do_sample=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer.decode(greedy_ids[0] )
snake_case__ : Tuple = TextIteratorStreamer(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
snake_case__ : Tuple = Thread(target=model.generate , kwargs=__SCREAMING_SNAKE_CASE )
thread.start()
snake_case__ : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case__ : List[str] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = -1
snake_case__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=1_0 , do_sample=__SCREAMING_SNAKE_CASE )
snake_case__ : str = greedy_ids[:, input_ids.shape[1] :]
snake_case__ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
snake_case__ : Any = TextStreamer(__SCREAMING_SNAKE_CASE , skip_prompt=__SCREAMING_SNAKE_CASE )
model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=1_0 , do_sample=__SCREAMING_SNAKE_CASE , streamer=__SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case__ : Any = cs.out[:-1]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
snake_case__ : str = AutoTokenizer.from_pretrained("""distilgpt2""" )
snake_case__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = -1
snake_case__ : Union[str, Any] = torch.ones((1, 5) , device=__SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
snake_case__ : Optional[int] = TextStreamer(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=__SCREAMING_SNAKE_CASE , streamer=__SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
snake_case__ : Tuple = cs.out[:-1] # Remove the final "\n"
snake_case__ : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __UpperCamelCase ( self ):
snake_case__ : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case__ : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = -1
snake_case__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : str = TextIteratorStreamer(__SCREAMING_SNAKE_CASE , timeout=0.001 )
snake_case__ : Dict = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
snake_case__ : List[str] = Thread(target=model.generate , kwargs=__SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Dict = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''lilt'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_0_5_2_2 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=1_0_2_4 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = vocab_size
snake_case__ : List[str] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : Dict = num_attention_heads
snake_case__ : List[str] = hidden_act
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : List[Any] = type_vocab_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
snake_case__ : Optional[int] = position_embedding_type
snake_case__ : List[str] = classifier_dropout
snake_case__ : Tuple = channel_shrink_ratio
snake_case__ : Dict = max_ad_position_embeddings
| 38 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 | 1 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case__ : str = 0
if start < end:
snake_case__ : str = randint(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = a[end]
snake_case__ : Dict = a[pivot]
snake_case__ : Union[str, Any] = temp
snake_case__ , snake_case__ : List[Any] = _in_place_partition(__magic_name__ , __magic_name__ , __magic_name__ )
count += _in_place_quick_sort(__magic_name__ , __magic_name__ , p - 1 )
count += _in_place_quick_sort(__magic_name__ , p + 1 , __magic_name__ )
return count
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = 0
snake_case__ : int = randint(__magic_name__ , __magic_name__ )
snake_case__ : Any = a[end]
snake_case__ : Dict = a[pivot]
snake_case__ : List[Any] = temp
snake_case__ : List[str] = start - 1
for index in range(__magic_name__ , __magic_name__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case__ : Dict = new_pivot_index + 1
snake_case__ : Union[str, Any] = a[new_pivot_index]
snake_case__ : int = a[index]
snake_case__ : str = temp
snake_case__ : List[Any] = a[new_pivot_index + 1]
snake_case__ : Optional[int] = a[end]
snake_case__ : List[Any] = temp
return new_pivot_index + 1, count
A_ : Dict = TemporaryFile()
A_ : str = 100 # 1000 elements are to be sorted
A_ , A_ : Union[str, Any] = 0, 1 # mean and standard deviation
A_ : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
A_ : List[str] = np.load(outfile)
A_ : List[Any] = len(M) - 1
A_ : List[Any] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 38 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
A_ : Union[str, Any] = None
A_ : Dict = logging.get_logger(__name__)
A_ : str = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
A_ : str = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
A_ : Tuple = {
"google/rembert": 256,
}
A_ : List[Any] = "▁"
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = RemBertTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , **__SCREAMING_SNAKE_CASE , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : Union[str, Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : Optional[int] = do_lower_case
snake_case__ : Tuple = remove_space
snake_case__ : List[Any] = keep_accents
snake_case__ : List[Any] = vocab_file
snake_case__ : str = False if not self.vocab_file else True
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : Optional[int] = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : List[Any] = [self.sep_token_id]
snake_case__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__SCREAMING_SNAKE_CASE ) )
return
snake_case__ : int = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 38 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : int = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : Any = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(1 )
snake_case__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Tuple = dc.update(2 )
snake_case__ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(3 )
snake_case__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : int = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 38 | 1 |
def __lowercase ( snake_case ):
"""simple docstring"""
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
__magic_name__ :List[Any] = [True] * (num + 1)
__magic_name__ :Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p, num + 1, snake_case ):
__magic_name__ :List[str] = False
p += 1
return [prime for prime in range(2, num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Tuple = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 0 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def _A ( ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('-f' )
__UpperCamelCase = parser.parse_args()
return args.f
def _A ( _lowercase , _lowercase="eval" ) -> Dict:
"""simple docstring"""
__UpperCamelCase = os.path.join(_lowercase , f'''{split}_results.json''' )
if os.path.exists(_lowercase ):
with open(_lowercase , 'r' ) as f:
return json.load(_lowercase )
raise ValueError(f'''can\'t find {path}''' )
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCamelCase (_a ):
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(A_,'argv',A_ ):
run_flax_glue.main()
__UpperCamelCase = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'],0.7_5 )
@slow
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(A_,'argv',A_ ):
run_clm_flax.main()
__UpperCamelCase = get_results(A_ )
self.assertLess(result['eval_perplexity'],100 )
@slow
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(A_,'argv',A_ ):
run_summarization_flax.main()
__UpperCamelCase = get_results(A_,split='test' )
self.assertGreaterEqual(result['test_rouge1'],10 )
self.assertGreaterEqual(result['test_rouge2'],2 )
self.assertGreaterEqual(result['test_rougeL'],7 )
self.assertGreaterEqual(result['test_rougeLsum'],7 )
@slow
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(A_,'argv',A_ ):
run_mlm_flax.main()
__UpperCamelCase = get_results(A_ )
self.assertLess(result['eval_perplexity'],42 )
@slow
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(A_,'argv',A_ ):
run_ta_mlm_flax.main()
__UpperCamelCase = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'],0.4_2 )
@slow
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(A_,'argv',A_ ):
run_flax_ner.main()
__UpperCamelCase = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'],0.7_5 )
self.assertGreaterEqual(result['eval_f1'],0.3 )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(A_,'argv',A_ ):
run_qa.main()
__UpperCamelCase = get_results(A_ )
self.assertGreaterEqual(result['eval_f1'],30 )
self.assertGreaterEqual(result['eval_exact'],30 )
| 1 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 | 0 |
import datasets
UpperCAmelCase_ = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
UpperCAmelCase_ = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
UpperCAmelCase_ = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :List[Any] ) -> List[str]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
"""simple docstring"""
def snake_case_ ( self : Union[str, Any] ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def snake_case_ ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> Optional[int]:
return {"accuracy": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
| 2 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True})
lowerCAmelCase_ = Features({"""text""": Value("""string""")})
lowerCAmelCase_ = Features({})
lowerCAmelCase_ = "text"
@property
def UpperCAmelCase_ ( self )-> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 3 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : List[Any] = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = ["""model.decoder.embed_positions.weights"""]
def A (__lowerCamelCase :Optional[int] ):
if "emb" in name:
_lowerCAmelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
_lowerCAmelCase = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
_lowerCAmelCase = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
_lowerCAmelCase = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
_lowerCAmelCase = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
_lowerCAmelCase = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
_lowerCAmelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
_lowerCAmelCase = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
_lowerCAmelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
_lowerCAmelCase = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
_lowerCAmelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def A (__lowerCamelCase :OrderedDict , __lowerCamelCase :int ):
_lowerCAmelCase = list(state_dict.keys() )
_lowerCAmelCase = {}
for key in keys:
_lowerCAmelCase = state_dict.pop(__lowerCamelCase )
_lowerCAmelCase = rename_keys(__lowerCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
_lowerCAmelCase = val[:hidden_size, :]
_lowerCAmelCase = val[hidden_size : 2 * hidden_size, :]
_lowerCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_lowerCAmelCase = val
else:
_lowerCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def A (__lowerCamelCase :str ):
if checkpoint == "small":
# default config values
_lowerCAmelCase = 1024
_lowerCAmelCase = 24
_lowerCAmelCase = 16
elif checkpoint == "medium":
_lowerCAmelCase = 1536
_lowerCAmelCase = 48
_lowerCAmelCase = 24
elif checkpoint == "large":
_lowerCAmelCase = 2048
_lowerCAmelCase = 48
_lowerCAmelCase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
_lowerCAmelCase = MusicgenDecoderConfig(
hidden_size=__lowerCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCamelCase , num_attention_heads=__lowerCamelCase , )
return config
@torch.no_grad()
def A (__lowerCamelCase :str , __lowerCamelCase :Union[str, Any]=None , __lowerCamelCase :List[str]=None , __lowerCamelCase :Optional[int]="cpu" ):
_lowerCAmelCase = MusicGen.get_pretrained(__lowerCamelCase , device=__lowerCamelCase )
_lowerCAmelCase = decoder_config_from_checkpoint(__lowerCamelCase )
_lowerCAmelCase = fairseq_model.lm.state_dict()
_lowerCAmelCase , _lowerCAmelCase = rename_state_dict(
__lowerCamelCase , hidden_size=decoder_config.hidden_size )
_lowerCAmelCase = TaEncoderModel.from_pretrained("""t5-base""" )
_lowerCAmelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
_lowerCAmelCase = MusicgenForCausalLM(__lowerCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_lowerCAmelCase , _lowerCAmelCase = decoder.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(__lowerCamelCase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
_lowerCAmelCase = MusicgenForConditionalGeneration(text_encoder=__lowerCamelCase , audio_encoder=__lowerCamelCase , decoder=__lowerCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__lowerCamelCase )
# check we can do a forward pass
_lowerCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_lowerCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_lowerCAmelCase = model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
_lowerCAmelCase = AutoTokenizer.from_pretrained("""t5-base""" )
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
_lowerCAmelCase = MusicgenProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
# set the appropriate bos/pad token ids
_lowerCAmelCase = 2048
_lowerCAmelCase = 2048
# set other default generation config params
_lowerCAmelCase = int(30 * audio_encoder.config.frame_rate )
_lowerCAmelCase = True
_lowerCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__lowerCamelCase )
processor.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
_lowercase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 5 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 0 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "OwlViTImageProcessor"
lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )):
SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )]
elif isinstance(__A , __A ) and isinstance(text[0] , __A ):
SCREAMING_SNAKE_CASE__ = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__A ) != max_num_queries:
SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A ))
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )
encodings.append(__A )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = input_ids
SCREAMING_SNAKE_CASE__ = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = self.image_processor(
__A , return_tensors=__A , **__A ).pixel_values
SCREAMING_SNAKE_CASE__ = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process(*__A , **__A )
def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*__A , **__A )
def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*__A , **__A )
def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor | 6 |
'''simple docstring'''
from __future__ import annotations
A_ : str = "Muhammad Umer Farooq"
A_ : Optional[Any] = "MIT"
A_ : int = "1.0.0"
A_ : int = "Muhammad Umer Farooq"
A_ : int = "[email protected]"
A_ : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : list[str] = []
snake_case__ : List[Any] = domain
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ : str = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__magic_name__ ).split(""".""" )[-2:] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return parse.urlparse(__magic_name__ ).netloc
def UpperCamelCase__ ( __magic_name__ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case__ : Optional[Any] = Parser(__magic_name__ )
try:
# Open URL
snake_case__ : Any = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ : Tuple = requests.get(__magic_name__ )
# Get the valid email.
snake_case__ : List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
A_ : str = emails_from_url("https://github.com")
print(F'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 38 | 0 |
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = None
_A = None
_A = graph
self._normalize_graph(_UpperCAmelCase , _UpperCAmelCase )
_A = len(_UpperCAmelCase )
_A = None
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_UpperCAmelCase ) > 1 or len(_UpperCAmelCase ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ):
_A = algorithm(self )
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any] ):
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
self._algorithm()
_A = True
def lowerCAmelCase_ ( self : int ):
pass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Any ):
super().__init__(_UpperCAmelCase )
# use this to save your result
_A = -1
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[Any] ):
super().__init__(_UpperCAmelCase )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def lowerCAmelCase_ ( self : Dict ):
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(_UpperCAmelCase ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(_UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_UpperCAmelCase ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_UpperCAmelCase , _UpperCAmelCase )
self.relabel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int ):
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a = [0]
a = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 7 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 | 0 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
if not is_accelerate_available():
return method
__A : Union[str, Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(__snake_case ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[int] , *__snake_case : int , **__snake_case : int ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__snake_case , **__snake_case )
return wrapper | 8 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38 | 0 |
from __future__ import annotations
from math import pow, sqrt
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 0 |
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
if index == number_of_items:
return 0
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = knapsack(__snake_case , __snake_case , __snake_case , __snake_case , index + 1 )
if weights[index] <= max_weight:
_UpperCamelCase = values[index] + knapsack(
__snake_case , __snake_case , __snake_case , max_weight - weights[index] , index + 1 )
return max(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38 | 0 |
'''simple docstring'''
import math
def lowerCAmelCase (__A):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase (__A = 0.1):
"""simple docstring"""
_a = 3
_a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(__A)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=True , ):
super().__init__()
snake_case__ : str = layers_per_block
snake_case__ : int = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : List[Any] = None
snake_case__ : List[Any] = nn.ModuleList([] )
# down
snake_case__ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = output_channel
snake_case__ : Union[str, Any] = block_out_channels[i]
snake_case__ : int = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : str = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
snake_case__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
snake_case__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : str = 2 * out_channels if double_z else out_channels
snake_case__ : int = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : Union[str, Any] = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = x
snake_case__ : int = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
snake_case__ : List[str] = down_block(__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
snake_case__ : Any = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
snake_case__ : Any = layers_per_block
snake_case__ : Optional[Any] = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : Optional[int] = in_channels if norm_type == """spatial""" else None
# mid
snake_case__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
snake_case__ : List[Any] = list(reversed(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = output_channel
snake_case__ : Optional[Any] = reversed_block_out_channels[i]
snake_case__ : List[str] = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : int = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : int = output_channel
# out
if norm_type == "spatial":
snake_case__ : List[Any] = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : Union[str, Any] = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : int = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = z
snake_case__ : Any = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
snake_case__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
snake_case__ : int = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : List[Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : Dict = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
snake_case__ : Optional[Any] = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="random" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
super().__init__()
snake_case__ : int = n_e
snake_case__ : Optional[int] = vq_embed_dim
snake_case__ : int = beta
snake_case__ : Optional[int] = legacy
snake_case__ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
snake_case__ : Optional[Any] = self.used.shape[0]
snake_case__ : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ : Dict = self.re_embed
snake_case__ : List[str] = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
snake_case__ : Union[str, Any] = n_e
snake_case__ : str = sane_index_shape
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : Dict = inds.reshape(ishape[0] , -1 )
snake_case__ : Any = self.used.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ : List[Any] = match.argmax(-1 )
snake_case__ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ : Optional[Any] = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : int = inds.reshape(ishape[0] , -1 )
snake_case__ : Optional[int] = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ : List[Any] = 0 # simply set to zero
snake_case__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# reshape z -> (batch, height, width, channel) and flatten
snake_case__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ : Dict = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
snake_case__ : Union[str, Any] = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
snake_case__ : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ : str = self.remap_to_used(__SCREAMING_SNAKE_CASE )
snake_case__ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ : Optional[int] = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ : int = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
snake_case__ : str = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
snake_case__ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : Tuple = parameters
snake_case__ , snake_case__ : Any = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
snake_case__ : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ : Optional[int] = deterministic
snake_case__ : Optional[int] = torch.exp(0.5 * self.logvar )
snake_case__ : Any = torch.exp(self.logvar )
if self.deterministic:
snake_case__ : List[str] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
# make sure sample is on the same device as the parameters and has same dtype
snake_case__ : Dict = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ : Optional[int] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.mean
| 38 | 0 |
lowerCamelCase__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase__ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase__ : Union[str, Any] = _calculate(days - 1 , lowercase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase__ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase__ : Dict = _calculate(days - 1 , lowercase_ , 0 )
lowercase__ : List[str] = state_late + state_absent + state_ontime
lowercase__ : List[Any] = prizestrings
return prizestrings
def UpperCamelCase ( lowercase_ = 30 ) -> int:
'''simple docstring'''
return _calculate(lowercase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 12 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : str = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = backbone_out_indices
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = backbone_featmap_shape
snake_case__ : List[Any] = scope
snake_case__ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
snake_case__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : Dict = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = DPTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = False
snake_case__ : str = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
snake_case__ : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case__ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case__ : List[str] = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = """add"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
snake_case__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs.predicted_depth
# verify the predicted depth
snake_case__ : Any = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 0 |
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> int:
__lowerCamelCase : str = start
__lowerCamelCase : Union[str, Any] = end
__lowerCamelCase : Any = val
__lowerCamelCase : int = (start + end) // 2
__lowerCamelCase : str = left
__lowerCamelCase : Tuple = right
def __repr__( self ) -> List[str]:
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Optional[Any] = collection
__lowerCamelCase : Tuple = function
if self.collection:
__lowerCamelCase : List[str] = self._build_tree(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
self._update_tree(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return self._query_range(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.collection[start] )
__lowerCamelCase : Optional[int] = (start + end) // 2
__lowerCamelCase : str = self._build_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE_ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if node.start == i and node.end == i:
__lowerCamelCase : Any = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.fn(node.left.val , node.right.val )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE_ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE_ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Dict:
if self.root is not None:
__lowerCamelCase : Any = Queue()
queue.put(self.root )
while not queue.empty():
__lowerCamelCase : List[str] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
A__ : List[Any] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 13 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : int = botoa.client("""iam""" )
snake_case__ : Union[str, Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) )
snake_case__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__magic_name__ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"]
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , )
snake_case__ : List[Any] = None
if credentials_configuration == 0:
snake_case__ : Dict = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
snake_case__ : List[str] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
snake_case__ : List[str] = _ask_field("""AWS Access Key ID: """ )
snake_case__ : int = aws_access_key_id
snake_case__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ )
snake_case__ : List[str] = aws_secret_access_key
snake_case__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
snake_case__ : Optional[int] = aws_region
snake_case__ : int = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , )
if role_management == 0:
snake_case__ : Optional[Any] = _ask_field("""Enter your IAM role name: """ )
else:
snake_case__ : Optional[int] = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__magic_name__ )
snake_case__ : Dict = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Any = None
if is_custom_docker_image:
snake_case__ : str = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() )
snake_case__ : Tuple = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : List[Any] = None
if is_sagemaker_inputs_enabled:
snake_case__ : str = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Optional[int] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case__ : List[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Tuple = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
snake_case__ : Any = {}
snake_case__ : List[Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
snake_case__ : str = """dynamo_"""
snake_case__ : Tuple = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case__ : List[str] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
snake_case__ : str = _ask_options(
"""Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , )
snake_case__ : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : str = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Dict = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
snake_case__ : List[str] = _ask_options(
__magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case__ : Optional[int] = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" )
snake_case__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case__ : Optional[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , __magic_name__ , default=1 , )
snake_case__ : Union[str, Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
| 38 | 0 |
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase ( __a : np.ndarray ,__a : tuple[int, int] ,__a : tuple[int, int] ,__a : bool ,) -> tuple[float | int, list[tuple[int, int]]]:
"""simple docstring"""
_a , _a : Optional[Any] = grid.shape
_a : Dict = [-1, 1, 0, 0]
_a : str = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
_a , _a : Dict = [(0, source)], set()
_a : Union[str, Any] = np.full((rows, cols) ,np.inf )
_a : Optional[Any] = 0
_a : Any = np.empty((rows, cols) ,dtype=__a )
_a : Tuple = None
while queue:
((_a) , (_a)) : List[Any] = heappop(__a )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
_a : Any = []
while (x, y) != source:
path.append((x, y) )
_a , _a : str = predecessors[x, y]
path.append(__a ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__a ) ):
_a , _a : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
_a : str = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__a ,(dist + 1, (nx, ny)) )
_a : Dict = dist + 1
_a : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 0 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A : Union[str, Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = field(default=UpperCAmelCase__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
A__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
A__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
A__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
A__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = v.to_dict()
return d
| 15 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _snake_case ( *__lowerCamelCase : List[str] , **__lowerCamelCase : Any ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _snake_case ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=__lowerCamelCase , image_processor=__lowerCamelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__lowerCamelCase , {
"score": ANY(__lowerCamelCase ),
"label": ANY(__lowerCamelCase ),
"box": {"xmin": ANY(__lowerCamelCase ), "ymin": ANY(__lowerCamelCase ), "xmax": ANY(__lowerCamelCase ), "ymax": ANY(__lowerCamelCase )},
} , )
import datasets
SCREAMING_SNAKE_CASE = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
SCREAMING_SNAKE_CASE = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
SCREAMING_SNAKE_CASE = object_detector(__lowerCamelCase , threshold=0.0 )
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(__lowerCamelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
__lowerCamelCase , {
"score": ANY(__lowerCamelCase ),
"label": ANY(__lowerCamelCase ),
"box": {"xmin": ANY(__lowerCamelCase ), "ymin": ANY(__lowerCamelCase ), "xmax": ANY(__lowerCamelCase ), "ymax": ANY(__lowerCamelCase )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def _snake_case ( self : int ):
pass
@require_torch
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-detr-mobilenetsv3"
SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=__lowerCamelCase , feature_extractor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
SCREAMING_SNAKE_CASE = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.3_376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=__lowerCamelCase , feature_extractor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9_982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE = pipeline("object-detection" , model=__lowerCamelCase )
SCREAMING_SNAKE_CASE = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
SCREAMING_SNAKE_CASE = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.9_982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.9_960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.9_955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.9_988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = 0.9_985
SCREAMING_SNAKE_CASE = "facebook/detr-resnet-50"
SCREAMING_SNAKE_CASE = pipeline("object-detection" , model=__lowerCamelCase )
SCREAMING_SNAKE_CASE = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.9_987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = "Narsil/layoutlmv3-finetuned-funsd"
SCREAMING_SNAKE_CASE = 0.9_993
SCREAMING_SNAKE_CASE = pipeline("object-detection" , model=__lowerCamelCase , threshold=__lowerCamelCase )
SCREAMING_SNAKE_CASE = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.9_993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , ) | 16 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | 0 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : Dict = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : Tuple = '''mask2former'''
_lowercase : Optional[Any] = ['''swin''']
_lowercase : Dict = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Optional[Any] , __A : Optional[Dict] = None , __A : int = 256 , __A : int = 256 , __A : int = 256 , __A : int = 1024 , __A : str = "relu" , __A : int = 6 , __A : int = 10 , __A : int = 8 , __A : float = 0.0 , __A : int = 2048 , __A : bool = False , __A : bool = False , __A : int = 4 , __A : int = 255 , __A : int = 100 , __A : float = 0.1 , __A : float = 2.0 , __A : float = 5.0 , __A : float = 5.0 , __A : int = 1_2544 , __A : float = 3.0 , __A : float = 0.7_5 , __A : float = 0.0_2 , __A : float = 1.0 , __A : bool = True , __A : List[int] = [4, 8, 16, 32] , __A : bool = None , **__A : Dict , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
__A : List[Any] = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__A , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__A , __A ):
__A : Any = backbone_config.pop("""model_type""" )
__A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__A : str = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
__A : int = backbone_config
__A : Optional[Any] = feature_size
__A : Union[str, Any] = mask_feature_size
__A : List[str] = hidden_dim
__A : Union[str, Any] = encoder_feedforward_dim
__A : int = activation_function
__A : Any = encoder_layers
__A : str = decoder_layers
__A : List[str] = num_attention_heads
__A : Tuple = dropout
__A : Tuple = dim_feedforward
__A : Optional[int] = pre_norm
__A : Optional[Any] = enforce_input_projection
__A : Any = common_stride
__A : Any = ignore_value
__A : List[str] = num_queries
__A : List[str] = no_object_weight
__A : str = class_weight
__A : Any = mask_weight
__A : Dict = dice_weight
__A : Optional[Any] = train_num_points
__A : str = oversample_ratio
__A : str = importance_sample_ratio
__A : List[str] = init_std
__A : Any = init_xavier_std
__A : Any = use_auxiliary_loss
__A : Optional[int] = feature_strides
__A : List[str] = output_auxiliary_logits
__A : List[str] = decoder_layers
super().__init__(**__A )
@classmethod
def lowerCAmelCase_ ( cls : str , __A : PretrainedConfig , **__A : int ):
return cls(
backbone_config=__A , **__A , )
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Optional[int] = copy.deepcopy(self.__dict__ )
__A : Any = self.backbone_config.to_dict()
__A : Optional[int] = self.__class__.model_type
return output
| 17 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38 | 0 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
__lowerCamelCase : Optional[str] = field(
default="tab_fact" ,metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__lowerCamelCase : Optional[str] = field(
default="tab_fact" ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ,)
__lowerCamelCase : int = field(
default=1_024 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
__lowerCamelCase : bool = field(
default=__magic_name__ ,metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__lowerCamelCase : bool = field(
default=__magic_name__ ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
__lowerCamelCase : Optional[int] = field(
default=__magic_name__ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
__lowerCamelCase : Optional[int] = field(
default=__magic_name__ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
__lowerCamelCase : Optional[int] = field(
default=__magic_name__ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} ,)
__lowerCamelCase : Optional[str] = field(
default=__magic_name__ ,metadata={"help": "A csv or a json file containing the training data."} )
__lowerCamelCase : Optional[str] = field(
default=__magic_name__ ,metadata={"help": "A csv or a json file containing the validation data."} )
__lowerCamelCase : Optional[str] = field(default=__magic_name__ ,metadata={"help": "A csv or a json file containing the test data."} )
def _snake_case ( self ) -> Optional[Any]:
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
_lowerCAmelCase = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowerCAmelCase = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase_ :
__lowerCamelCase : str = field(
default=__magic_name__ ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowerCamelCase : Optional[str] = field(
default=__magic_name__ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCamelCase : Optional[str] = field(
default=__magic_name__ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowerCamelCase : Optional[str] = field(
default=__magic_name__ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
__lowerCamelCase : bool = field(
default=__magic_name__ ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
__lowerCamelCase : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
__lowerCamelCase : bool = field(
default=__magic_name__ ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
def __a():
'''simple docstring'''
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowerCAmelCase = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowerCAmelCase = data_args.train_file.split("." )[-1]
_lowerCAmelCase = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowerCAmelCase = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
_lowerCAmelCase = load_dataset("csv" , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowerCAmelCase = load_dataset("json" , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowerCAmelCase = raw_datasets["train"].features["label"].names
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowerCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=SCREAMING_SNAKE_CASE_ , )
_lowerCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowerCAmelCase = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowerCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowerCAmelCase = {"Refused": 0, "Entailed": 1}
_lowerCAmelCase = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(SCREAMING_SNAKE_CASE_ : List[Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(SCREAMING_SNAKE_CASE_ : int ):
_lowerCAmelCase = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
_lowerCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowerCAmelCase = examples["statement"]
_lowerCAmelCase = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
_lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
_lowerCAmelCase = raw_datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_lowerCAmelCase = raw_datasets["train"]
if data_args.max_train_samples is not None:
_lowerCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_lowerCAmelCase = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_lowerCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
_lowerCAmelCase = raw_datasets["test"]
if data_args.max_predict_samples is not None:
_lowerCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE_ : EvalPrediction ):
_lowerCAmelCase = p.predictions[0] if isinstance(p.predictions , SCREAMING_SNAKE_CASE_ ) else p.predictions
_lowerCAmelCase = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowerCAmelCase = default_data_collator
elif training_args.fpaa:
_lowerCAmelCase = DataCollatorWithPadding(SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 )
else:
_lowerCAmelCase = None
# Initialize our Trainer
_lowerCAmelCase = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
_lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase = last_checkpoint
_lowerCAmelCase = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
_lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCAmelCase = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowerCAmelCase = predict_dataset.remove_columns("label" )
_lowerCAmelCase = trainer.predict(SCREAMING_SNAKE_CASE_ , metric_key_prefix="predict" ).predictions
_lowerCAmelCase = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
_lowerCAmelCase = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE_ , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
_lowerCAmelCase = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 18 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a = {
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 19 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase_ : List[str] = 500000
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = os.path.split(__file__)
UpperCAmelCase_ : Any = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowerCAmelCase_ ( lowerCamelCase , **lowerCamelCase ):
__magic_name__ : Tuple =dataset.map(**lowerCamelCase )
@get_duration
def lowerCAmelCase_ ( lowerCamelCase , **lowerCamelCase ):
__magic_name__ : List[str] =dataset.filter(**lowerCamelCase )
def lowerCAmelCase_ ( ):
__magic_name__ : Optional[int] ={"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : str =datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
__magic_name__ : str =generate_example_dataset(
os.path.join(lowerCamelCase , """dataset.arrow""" ) , lowerCamelCase , num_examples=lowerCamelCase )
__magic_name__ : Dict =transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
__magic_name__ : Union[str, Any] =map(lowerCamelCase )
__magic_name__ : List[Any] =map(lowerCamelCase , batched=lowerCamelCase )
__magic_name__ : Optional[int] =map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
__magic_name__ : Optional[Any] =map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
__magic_name__ : int =map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
__magic_name__ : List[Any] =map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
__magic_name__ : Optional[Any] =map(lowerCamelCase , function=lambda lowerCamelCase : None , batched=lowerCamelCase )
__magic_name__ : Optional[Any] =map(lowerCamelCase , function=lowerCamelCase , batched=lowerCamelCase )
__magic_name__ : Optional[int] =filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase , """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 21 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : int = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : Any = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(1 )
snake_case__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Tuple = dc.update(2 )
snake_case__ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(3 )
snake_case__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : int = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 38 | 0 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : list[list[int]] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : set ):
'''simple docstring'''
_a , _a = len(UpperCamelCase ), len(grid[0] )
if (
min(UpperCamelCase , UpperCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_a = 0
count += depth_first_search(UpperCamelCase , row + 1 , UpperCamelCase , UpperCamelCase )
count += depth_first_search(UpperCamelCase , row - 1 , UpperCamelCase , UpperCamelCase )
count += depth_first_search(UpperCamelCase , UpperCamelCase , col + 1 , UpperCamelCase )
count += depth_first_search(UpperCamelCase , UpperCamelCase , col - 1 , UpperCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 0 |
import os
import time
import numpy as np
import onnxruntime as ort
snake_case__ : Tuple = """1"""
snake_case__ : Optional[int] = """0"""
snake_case__ : Any = """1"""
snake_case__ : List[str] = ort.SessionOptions()
snake_case__ : List[str] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
snake_case__ : List[Any] = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
snake_case__ : str = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
snake_case__ : List[str] = ort.RunOptions()
snake_case__ : Optional[int] = 1_2_8
snake_case__ : Optional[int] = 1
snake_case__ : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ : int = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ : Tuple = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
snake_case__ : str = time.time()
snake_case__ : Tuple = 2_0_0_0
snake_case__ : Dict = {}
for iter in range(max_iters):
snake_case__ : Tuple = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1_0_0_0 / max_iters))
| 23 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ : int = '''docs/source/en/_toctree.yml'''
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
__snake_case = defaultdict(_lowerCamelCase )
__snake_case = []
__snake_case = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(_lowerCamelCase )
__snake_case = new_doc_list
__snake_case = [key for key, value in counts.items() if value > 1]
__snake_case = []
for duplicate_key in duplicates:
__snake_case = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def _UpperCamelCase (_lowerCamelCase : Tuple=False )-> Any:
'''simple docstring'''
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
__snake_case = yaml.safe_load(f.read() )
# Get to the API doc
__snake_case = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case = content[api_idx]['''sections''']
# Then to the model doc
__snake_case = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__snake_case = api_doc[scheduler_idx]['''sections''']
__snake_case = clean_doc_toc(_lowerCamelCase )
__snake_case = False
if new_scheduler_doc != scheduler_doc:
__snake_case = True
if overwrite:
__snake_case = new_scheduler_doc
if diff:
if overwrite:
__snake_case = api_doc
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def _UpperCamelCase (_lowerCamelCase : int=False )-> Optional[int]:
'''simple docstring'''
with open(_lowerCamelCase , encoding='''utf-8''' ) as f:
__snake_case = yaml.safe_load(f.read() )
# Get to the API doc
__snake_case = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case = content[api_idx]['''sections''']
# Then to the model doc
__snake_case = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__snake_case = False
__snake_case = api_doc[pipeline_idx]['''sections''']
__snake_case = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__snake_case = pipeline_doc['''section''']
__snake_case = clean_doc_toc(_lowerCamelCase )
if overwrite:
__snake_case = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
__snake_case = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
__snake_case = True
if overwrite:
__snake_case = new_pipeline_docs
if diff:
if overwrite:
__snake_case = api_doc
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCAmelCase_ : Dict = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 24 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 25 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38 | 0 |
'''simple docstring'''
from ... import PretrainedConfig
__UpperCamelCase = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class _A ( __lowercase ):
lowercase__: List[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowercase__: Optional[Any] = '''nezha'''
def __init__( self : List[Any] , __magic_name__ : str=2_11_28 , __magic_name__ : Tuple=7_68 , __magic_name__ : int=12 , __magic_name__ : List[str]=12 , __magic_name__ : List[str]=30_72 , __magic_name__ : Tuple="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Dict=0.1 , __magic_name__ : int=5_12 , __magic_name__ : List[str]=64 , __magic_name__ : Optional[Any]=2 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=1E-12 , __magic_name__ : Any=0.1 , __magic_name__ : str=0 , __magic_name__ : List[Any]=2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=True , **__magic_name__ : List[str] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
__snake_case : Tuple = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : int = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : str = max_relative_position
__snake_case : Optional[int] = type_vocab_size
__snake_case : Any = initializer_range
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Dict = classifier_dropout
__snake_case : List[str] = use_cache
| 26 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 | 0 |
from math import ceil, sqrt
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int:
"""simple docstring"""
_A = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_A = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_A = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"{solution() = }")
| 27 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Any = ReformerTokenizer
A : Any = ReformerTokenizerFast
A : Any = True
A : Tuple = False
A : Optional[Any] = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = ReformerTokenizer(A, keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '<s>'
SCREAMING_SNAKE_CASE : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ), A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '<unk>' )
self.assertEqual(vocab_keys[1], '<s>' )
self.assertEqual(vocab_keys[-1], 'j' )
self.assertEqual(len(A ), 1_000 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_000 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(A )
SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer.tokenize(A )
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(A, add_special_tokens=A )
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.encode(A )
self.assertListEqual(A, A )
def UpperCamelCase_ ( self, A=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(A, **A )
# Simple input
SCREAMING_SNAKE_CASE : Tuple = 'This is a simple input'
SCREAMING_SNAKE_CASE : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
SCREAMING_SNAKE_CASE : int = ('This is a simple input', 'This is a pair')
SCREAMING_SNAKE_CASE : List[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(A, tokenizer_r.encode, A, max_length=A, padding='max_length' )
# Simple input
self.assertRaises(A, tokenizer_r.encode_plus, A, max_length=A, padding='max_length' )
# Simple input
self.assertRaises(
A, tokenizer_r.batch_encode_plus, A, max_length=A, padding='max_length', )
# Pair input
self.assertRaises(A, tokenizer_r.encode, A, max_length=A, padding='max_length' )
# Pair input
self.assertRaises(A, tokenizer_r.encode_plus, A, max_length=A, padding='max_length' )
# Pair input
self.assertRaises(
A, tokenizer_r.batch_encode_plus, A, max_length=A, padding='max_length', )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ReformerTokenizer(A, keep_accents=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(A, ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ), [285, 46, 10, 170, 382], )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
], )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
], )
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 'Hello World!'
SCREAMING_SNAKE_CASE : Optional[int] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(A, self.big_tokenizer.encode(A ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
SCREAMING_SNAKE_CASE : List[Any] = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(A, self.big_tokenizer.encode(A ) )
@require_torch
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
SCREAMING_SNAKE_CASE : List[str] = list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE : Union[str, Any] = ' '.join(A )
SCREAMING_SNAKE_CASE : Dict = self.big_tokenizer.encode_plus(A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Optional[int] = self.big_tokenizer.batch_encode_plus([sequence, sequence], return_tensors='pt' )
SCREAMING_SNAKE_CASE : str = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
SCREAMING_SNAKE_CASE : List[Any] = encoded_sequence['input_ids'].shape
SCREAMING_SNAKE_CASE : List[str] = ReformerModel(A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**A )
model(**A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = {'input_ids': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
SCREAMING_SNAKE_CASE : Dict = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=A, model_name='google/reformer-crime-and-punishment', revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a', padding=A, sequences=A, )
| 28 |
'''simple docstring'''
from __future__ import annotations
A_ : str = "Muhammad Umer Farooq"
A_ : Optional[Any] = "MIT"
A_ : int = "1.0.0"
A_ : int = "Muhammad Umer Farooq"
A_ : int = "[email protected]"
A_ : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : list[str] = []
snake_case__ : List[Any] = domain
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ : str = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__magic_name__ ).split(""".""" )[-2:] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return parse.urlparse(__magic_name__ ).netloc
def UpperCamelCase__ ( __magic_name__ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case__ : Optional[Any] = Parser(__magic_name__ )
try:
# Open URL
snake_case__ : Any = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ : Tuple = requests.get(__magic_name__ )
# Get the valid email.
snake_case__ : List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
A_ : str = emails_from_url("https://github.com")
print(F'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 38 | 0 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
A_ = logging.getLogger(__name__)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
# save results
if os.path.exists(lowerCAmelCase__ ):
if os.path.exists(os.path.join(lowerCAmelCase__ ,'''config.json''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase__ ,'''config.json''' ) ):
os.remove(os.path.join(lowerCAmelCase__ ,'''config.json''' ) )
if os.path.exists(os.path.join(lowerCAmelCase__ ,'''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase__ ,'''pytorch_model.bin''' ) ):
os.remove(os.path.join(lowerCAmelCase__ ,'''pytorch_model.bin''' ) )
else:
os.makedirs(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=False ):
lowerCamelCase_ = 2
if unlogit:
lowerCamelCase_ = torch.pow(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = p * torch.log(lowerCAmelCase__ )
lowerCamelCase_ = 0
return -plogp.sum(dim=-1 )
def lowercase ( lowerCAmelCase__ ):
logger.info('''lv, h >\t''' + '''\t'''.join(f"{x + 1}" for x in range(len(lowerCAmelCase__ ) ) ) )
for row in range(len(lowerCAmelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + '''\t'''.join(f"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(f"layer {row + 1}:\t" + '''\t'''.join(f"{x:d}" for x in tensor[row].cpu().data ) )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ,lowerCAmelCase__=True ,lowerCAmelCase__=None ,lowerCAmelCase__=False ):
lowerCamelCase_ , lowerCamelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCamelCase_ = torch.zeros(lowerCAmelCase__ ,lowerCAmelCase__ ).to(args.device )
lowerCamelCase_ = torch.zeros(lowerCAmelCase__ ,lowerCAmelCase__ ).to(args.device )
if head_mask is None:
lowerCamelCase_ = torch.ones(lowerCAmelCase__ ,lowerCAmelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCamelCase_ = None
lowerCamelCase_ = 0.0
lowerCamelCase_ = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase__ ,desc='''Iteration''' ,disable=args.local_rank not in [-1, 0] ) ):
lowerCamelCase_ = tuple(t.to(args.device ) for t in inputs )
((lowerCamelCase_) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCamelCase_ = model(lowerCAmelCase__ ,labels=lowerCAmelCase__ ,head_mask=lowerCAmelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = entropy(attn.detach() ,lowerCAmelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCamelCase_ = 2
lowerCamelCase_ = torch.pow(torch.pow(lowerCAmelCase__ ,lowerCAmelCase__ ).sum(-1 ) ,1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
lowerCamelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(lowerCAmelCase__ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(lowerCAmelCase__ )
logger.info('''Head ranked by importance scores''' )
lowerCamelCase_ = torch.zeros(head_importance.numel() ,dtype=torch.long ,device=args.device )
lowerCamelCase_ = torch.arange(
head_importance.numel() ,device=args.device )
lowerCamelCase_ = head_ranks.view_as(lowerCAmelCase__ )
print_ad_tensor(lowerCAmelCase__ )
return attn_entropy, head_importance, total_loss
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = compute_heads_importance(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,compute_entropy=lowerCAmelCase__ )
lowerCamelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' ,lowerCAmelCase__ ,original_score * args.masking_threshold )
lowerCamelCase_ = torch.ones_like(lowerCAmelCase__ )
lowerCamelCase_ = max(1 ,int(new_head_mask.numel() * args.masking_amount ) )
lowerCamelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
lowerCamelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCamelCase_ = float('''Inf''' )
lowerCamelCase_ = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase__ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowerCamelCase_ = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' ,str(current_heads_to_mask.tolist() ) )
lowerCamelCase_ = new_head_mask.view(-1 )
lowerCamelCase_ = 0.0
lowerCamelCase_ = new_head_mask.view_as(lowerCAmelCase__ )
lowerCamelCase_ = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase__ )
# Compute metric and head importance again
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = compute_heads_importance(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,compute_entropy=lowerCAmelCase__ ,head_mask=lowerCAmelCase__ )
lowerCamelCase_ = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' ,lowerCAmelCase__ ,new_head_mask.sum() ,new_head_mask.sum() / new_head_mask.numel() * 100 ,)
logger.info('''Final head mask''' )
print_ad_tensor(lowerCAmelCase__ )
np.save(os.path.join(args.output_dir ,'''head_mask.npy''' ) ,head_mask.detach().cpu().numpy() )
return head_mask
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = datetime.now()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = compute_heads_importance(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,compute_entropy=lowerCAmelCase__ ,compute_importance=lowerCAmelCase__ ,head_mask=lowerCAmelCase__ )
lowerCamelCase_ = 1 / loss
lowerCamelCase_ = datetime.now() - before_time
lowerCamelCase_ = sum(p.numel() for p in model.parameters() )
lowerCamelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [
v,
]
assert sum(len(lowerCAmelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase__ )
lowerCamelCase_ = sum(p.numel() for p in model.parameters() )
lowerCamelCase_ = datetime.now()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = compute_heads_importance(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,compute_entropy=lowerCAmelCase__ ,compute_importance=lowerCAmelCase__ ,head_mask=lowerCAmelCase__ ,actually_pruned=lowerCAmelCase__ ,)
lowerCamelCase_ = 1 / loss
lowerCamelCase_ = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' ,lowerCAmelCase__ ,lowerCAmelCase__ ,pruned_num_params / original_num_params * 100 ,)
logger.info('''Pruning: score with masking: %f score with pruning: %f''' ,lowerCAmelCase__ ,lowerCAmelCase__ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' ,original_time / new_time * 100 )
save_model(lowerCAmelCase__ ,args.output_dir )
def lowercase ( ):
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' ,default=lowerCAmelCase__ ,type=lowerCAmelCase__ ,required=lowerCAmelCase__ ,help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' ,)
parser.add_argument(
'''--model_name_or_path''' ,default=lowerCAmelCase__ ,type=lowerCAmelCase__ ,required=lowerCAmelCase__ ,help='''Path to pretrained model or model identifier from huggingface.co/models''' ,)
parser.add_argument(
'''--output_dir''' ,default=lowerCAmelCase__ ,type=lowerCAmelCase__ ,required=lowerCAmelCase__ ,help='''The output directory where the model predictions and checkpoints will be written.''' ,)
# Other parameters
parser.add_argument(
'''--config_name''' ,default='''''' ,type=lowerCAmelCase__ ,help='''Pretrained config name or path if not the same as model_name_or_path''' ,)
parser.add_argument(
'''--tokenizer_name''' ,default='''''' ,type=lowerCAmelCase__ ,help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' ,)
parser.add_argument(
'''--cache_dir''' ,default=lowerCAmelCase__ ,type=lowerCAmelCase__ ,help='''Where do you want to store the pre-trained models downloaded from s3''' ,)
parser.add_argument(
'''--data_subset''' ,type=lowerCAmelCase__ ,default=-1 ,help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' ,action='''store_true''' ,help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' ,action='''store_true''' ,help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' ,action='''store_true''' ,help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' ,action='''store_true''' ,help='''Don\'t normalize all importance scores between 0 and 1''' ,)
parser.add_argument(
'''--try_masking''' ,action='''store_true''' ,help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' ,default=0.9 ,type=lowerCAmelCase__ ,help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' ,)
parser.add_argument(
'''--masking_amount''' ,default=0.1 ,type=lowerCAmelCase__ ,help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' ,default='''acc''' ,type=lowerCAmelCase__ ,help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' ,default=128 ,type=lowerCAmelCase__ ,help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) ,)
parser.add_argument('''--batch_size''' ,default=1 ,type=lowerCAmelCase__ ,help='''Batch size.''' )
parser.add_argument('''--seed''' ,type=lowerCAmelCase__ ,default=42 )
parser.add_argument('''--local_rank''' ,type=lowerCAmelCase__ ,default=-1 ,help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' ,action='''store_true''' ,help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' ,type=lowerCAmelCase__ ,default='''''' ,help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' ,type=lowerCAmelCase__ ,default='''''' ,help='''Can be used for distant debugging.''' )
lowerCamelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=lowerCAmelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCamelCase_ = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowerCamelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCamelCase_ = torch.device('''cuda''' ,args.local_rank )
lowerCamelCase_ = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device ,args.n_gpu ,bool(args.local_rank != -1 ) ) )
lowerCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCamelCase_ = nn.parallel.DistributedDataParallel(
lowerCAmelCase__ ,device_ids=[args.local_rank] ,output_device=args.local_rank ,find_unused_parameters=lowerCAmelCase__ )
elif args.n_gpu > 1:
lowerCamelCase_ = nn.DataParallel(lowerCAmelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir ,exist_ok=lowerCAmelCase__ )
torch.save(lowerCAmelCase__ ,os.path.join(args.output_dir ,'''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' ,lowerCAmelCase__ )
# Prepare dataset
lowerCamelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir ,dtype=np.intaa ),
] )
lowerCamelCase_ = (torch.from_numpy(lowerCAmelCase__ ),)
lowerCamelCase_ = TensorDataset(*lowerCAmelCase__ )
lowerCamelCase_ = RandomSampler(lowerCAmelCase__ )
lowerCamelCase_ = DataLoader(lowerCAmelCase__ ,sampler=lowerCAmelCase__ ,batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCamelCase_ = mask_heads(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
prune_heads(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 29 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __a( nn.Module ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 0.0
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = jnp.floataa
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Union[str, Any] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Tuple = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = resnets
UpperCAmelCase_ : List[str] = attentions
if self.add_downsample:
UpperCAmelCase_ : Tuple = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = attn(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : Any = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class __a( nn.Module ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 0.0
lowerCAmelCase = 1
lowerCAmelCase = True
lowerCAmelCase = jnp.floataa
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : Dict = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> Any:
UpperCAmelCase_ : str = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : Optional[Any] = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class __a( nn.Module ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 0.0
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = jnp.floataa
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Tuple = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> Dict:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : int = res_hidden_states_tuple[-1]
UpperCAmelCase_ : int = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[str] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = attn(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class __a( nn.Module ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 0.0
lowerCAmelCase = 1
lowerCAmelCase = True
lowerCAmelCase = jnp.floataa
def a__ ( self ) -> Any:
UpperCAmelCase_ : List[Any] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Any = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = resnets
if self.add_upsample:
UpperCAmelCase_ : List[str] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> str:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Optional[Any] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[str] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Optional[Any] = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
UpperCAmelCase_ : Tuple = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class __a( nn.Module ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 0.0
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = jnp.floataa
def a__ ( self ) -> Tuple:
# there is always at least one resnet
UpperCAmelCase_ : Any = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : int = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Tuple = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = resnets
UpperCAmelCase_ : Optional[Any] = attentions
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> Any:
UpperCAmelCase_ : int = self.resnets[0](_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[int] = attn(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
return hidden_states | 30 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Union[str, Any]=("DownEncoderBlock2D",) , _lowerCAmelCase : Any=(64,) , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : Any="silu" , _lowerCAmelCase : Any=True , ):
super().__init__()
SCREAMING_SNAKE_CASE_ = layers_per_block
SCREAMING_SNAKE_CASE_ = torch.nn.Convad(
_lowerCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE_ = block_out_channels[0]
for i, down_block_type in enumerate(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = block_out_channels[i]
SCREAMING_SNAKE_CASE_ = i == len(_lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE_ = get_down_block(
_lowerCAmelCase , num_layers=self.layers_per_block , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=_lowerCAmelCase , resnet_groups=_lowerCAmelCase , attention_head_dim=_lowerCAmelCase , temb_channels=_lowerCAmelCase , )
self.down_blocks.append(_lowerCAmelCase )
# mid
SCREAMING_SNAKE_CASE_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowerCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowerCAmelCase , temb_channels=_lowerCAmelCase , )
# out
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_lowerCAmelCase , eps=1E-6 )
SCREAMING_SNAKE_CASE_ = nn.SiLU()
SCREAMING_SNAKE_CASE_ = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE_ = nn.Convad(block_out_channels[-1] , _lowerCAmelCase , 3 , padding=1 )
SCREAMING_SNAKE_CASE_ = False
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = x
SCREAMING_SNAKE_CASE_ = self.conv_in(_lowerCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowerCAmelCase : Dict ):
def custom_forward(*_lowerCAmelCase : Optional[Any] ):
return module(*_lowerCAmelCase )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
# middle
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase )
# middle
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _lowerCAmelCase )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ = down_block(_lowerCAmelCase )
# middle
SCREAMING_SNAKE_CASE_ = self.mid_block(_lowerCAmelCase )
# post-process
SCREAMING_SNAKE_CASE_ = self.conv_norm_out(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conv_act(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conv_out(_lowerCAmelCase )
return sample
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : str=3 , _lowerCAmelCase : int=3 , _lowerCAmelCase : List[Any]=("UpDecoderBlock2D",) , _lowerCAmelCase : List[str]=(64,) , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : int="silu" , _lowerCAmelCase : Any="group" , ):
super().__init__()
SCREAMING_SNAKE_CASE_ = layers_per_block
SCREAMING_SNAKE_CASE_ = nn.Convad(
_lowerCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = nn.ModuleList([] )
SCREAMING_SNAKE_CASE_ = in_channels if norm_type == 'spatial' else None
# mid
SCREAMING_SNAKE_CASE_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowerCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowerCAmelCase , temb_channels=_lowerCAmelCase , )
# up
SCREAMING_SNAKE_CASE_ = list(reversed(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE_ = i == len(_lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE_ = get_up_block(
_lowerCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=_lowerCAmelCase , resnet_groups=_lowerCAmelCase , attention_head_dim=_lowerCAmelCase , temb_channels=_lowerCAmelCase , resnet_time_scale_shift=_lowerCAmelCase , )
self.up_blocks.append(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE_ = SpatialNorm(block_out_channels[0] , _lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_lowerCAmelCase , eps=1E-6 )
SCREAMING_SNAKE_CASE_ = nn.SiLU()
SCREAMING_SNAKE_CASE_ = nn.Convad(block_out_channels[0] , _lowerCAmelCase , 3 , padding=1 )
SCREAMING_SNAKE_CASE_ = False
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=None ):
SCREAMING_SNAKE_CASE_ = z
SCREAMING_SNAKE_CASE_ = self.conv_in(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowerCAmelCase : Dict ):
def custom_forward(*_lowerCAmelCase : int ):
return module(*_lowerCAmelCase )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCAmelCase , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = sample.to(_lowerCAmelCase )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
else:
# middle
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = sample.to(_lowerCAmelCase )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase )
else:
# middle
SCREAMING_SNAKE_CASE_ = self.mid_block(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = sample.to(_lowerCAmelCase )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ = up_block(_lowerCAmelCase , _lowerCAmelCase )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE_ = self.conv_norm_out(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = self.conv_norm_out(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conv_act(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conv_out(_lowerCAmelCase )
return sample
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : List[str]="random" , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Dict=True ):
super().__init__()
SCREAMING_SNAKE_CASE_ = n_e
SCREAMING_SNAKE_CASE_ = vq_embed_dim
SCREAMING_SNAKE_CASE_ = beta
SCREAMING_SNAKE_CASE_ = legacy
SCREAMING_SNAKE_CASE_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE_ = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE_ = self.used.shape[0]
SCREAMING_SNAKE_CASE_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE_ = self.re_embed
SCREAMING_SNAKE_CASE_ = self.re_embed + 1
print(
F"Remapping {self.n_e} indices to {self.re_embed} indices. "
F"Using {self.unknown_index} for unknown indices." )
else:
SCREAMING_SNAKE_CASE_ = n_e
SCREAMING_SNAKE_CASE_ = sane_index_shape
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = inds.shape
assert len(_lowerCAmelCase ) > 1
SCREAMING_SNAKE_CASE_ = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE_ = self.used.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE_ = match.argmax(-1 )
SCREAMING_SNAKE_CASE_ = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE_ = self.unknown_index
return new.reshape(_lowerCAmelCase )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = inds.shape
assert len(_lowerCAmelCase ) > 1
SCREAMING_SNAKE_CASE_ = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE_ = self.used.to(_lowerCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE_ = 0 # simply set to zero
SCREAMING_SNAKE_CASE_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _lowerCAmelCase )
return back.reshape(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Optional[int] ):
# reshape z -> (batch, height, width, channel) and flatten
SCREAMING_SNAKE_CASE_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE_ = torch.argmin(torch.cdist(_lowerCAmelCase , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE_ = self.embedding(_lowerCAmelCase ).view(z.shape )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE_ = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE_ = self.remap_to_used(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
SCREAMING_SNAKE_CASE_ = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE_ = self.unmap_to_all(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE_ = self.embedding(_lowerCAmelCase )
if shape is not None:
SCREAMING_SNAKE_CASE_ = z_q.view(_lowerCAmelCase )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=False ):
SCREAMING_SNAKE_CASE_ = parameters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.chunk(_lowerCAmelCase , 2 , dim=1 )
SCREAMING_SNAKE_CASE_ = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE_ = deterministic
SCREAMING_SNAKE_CASE_ = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE_ = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
SCREAMING_SNAKE_CASE_ = randn_tensor(
self.mean.shape , generator=_lowerCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE_ = self.mean + self.std * sample
return x
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Union[str, Any]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int]=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
return self.mean | 31 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
_UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCAmelCase = DisjunctiveConstraint(_UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids , _UpperCamelCase ) )
with self.assertRaises(_UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCamelCase( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
_UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_UpperCamelCase ):
DisjunctiveConstraint(_UpperCamelCase ) # fails here
def UpperCamelCase( self ):
_UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
_UpperCAmelCase = DisjunctiveConstraint(_UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(1 )
_UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(_UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(2 )
_UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(_UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(3 )
_UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(_UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCamelCase( self ):
_UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCAmelCase = DisjunctiveConstraint(_UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 32 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
snake_case__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = StableDiffusionLatentUpscalePipeline
__lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase : List[Any] = frozenset([] )
__lowercase : Any = True
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = 1
snake_case__ = 4
snake_case__ = (16, 16)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
snake_case__ = CLIPTextModel(_a )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
snake_case__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 2
snake_case__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case__ = getattr(_a , scheduler_enum.name )
snake_case__ = scheduler_cls.from_config(pipe.scheduler.config )
snake_case__ = pipe(**_a )[0]
outputs.append(_a )
assert check_same_shape(_a )
@require_torch_gpu
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 33 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=True , ):
super().__init__()
snake_case__ : str = layers_per_block
snake_case__ : int = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : List[Any] = None
snake_case__ : List[Any] = nn.ModuleList([] )
# down
snake_case__ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = output_channel
snake_case__ : Union[str, Any] = block_out_channels[i]
snake_case__ : int = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : str = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
snake_case__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
snake_case__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : str = 2 * out_channels if double_z else out_channels
snake_case__ : int = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : Union[str, Any] = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = x
snake_case__ : int = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
snake_case__ : List[str] = down_block(__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
snake_case__ : Any = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
snake_case__ : Any = layers_per_block
snake_case__ : Optional[Any] = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : Optional[int] = in_channels if norm_type == """spatial""" else None
# mid
snake_case__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
snake_case__ : List[Any] = list(reversed(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = output_channel
snake_case__ : Optional[Any] = reversed_block_out_channels[i]
snake_case__ : List[str] = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : int = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : int = output_channel
# out
if norm_type == "spatial":
snake_case__ : List[Any] = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : Union[str, Any] = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : int = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = z
snake_case__ : Any = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
snake_case__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
snake_case__ : int = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : List[Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : Dict = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
snake_case__ : Optional[Any] = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="random" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
super().__init__()
snake_case__ : int = n_e
snake_case__ : Optional[int] = vq_embed_dim
snake_case__ : int = beta
snake_case__ : Optional[int] = legacy
snake_case__ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
snake_case__ : Optional[Any] = self.used.shape[0]
snake_case__ : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ : Dict = self.re_embed
snake_case__ : List[str] = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
snake_case__ : Union[str, Any] = n_e
snake_case__ : str = sane_index_shape
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : Dict = inds.reshape(ishape[0] , -1 )
snake_case__ : Any = self.used.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ : List[Any] = match.argmax(-1 )
snake_case__ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ : Optional[Any] = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : int = inds.reshape(ishape[0] , -1 )
snake_case__ : Optional[int] = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ : List[Any] = 0 # simply set to zero
snake_case__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# reshape z -> (batch, height, width, channel) and flatten
snake_case__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ : Dict = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
snake_case__ : Union[str, Any] = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
snake_case__ : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ : str = self.remap_to_used(__SCREAMING_SNAKE_CASE )
snake_case__ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ : Optional[int] = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ : int = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
snake_case__ : str = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
snake_case__ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : Tuple = parameters
snake_case__ , snake_case__ : Any = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
snake_case__ : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ : Optional[int] = deterministic
snake_case__ : Optional[int] = torch.exp(0.5 * self.logvar )
snake_case__ : Any = torch.exp(self.logvar )
if self.deterministic:
snake_case__ : List[str] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
# make sure sample is on the same device as the parameters and has same dtype
snake_case__ : Dict = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ : Optional[int] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.mean
| 38 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = LEDTokenizer
A_ = LEDTokenizerFast
A_ = True
def UpperCAmelCase__ ( self) -> int:
super().setUp()
UpperCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_))))
UpperCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase = {'''unk_token''': '''<unk>'''}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(lowerCamelCase_) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(lowerCamelCase_))
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Union[str, Any]:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Optional[int]:
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase__ ( self) -> Any:
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''')
@cached_property
def UpperCAmelCase__ ( self) -> Any:
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''')
@require_torch
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCamelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(lowerCamelCase_ , max_length=len(lowerCamelCase_) , padding=lowerCamelCase_ , return_tensors='''pt''')
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
@require_torch
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''pt''')
self.assertIn('''input_ids''' , lowerCamelCase_)
self.assertIn('''attention_mask''' , lowerCamelCase_)
self.assertNotIn('''labels''' , lowerCamelCase_)
self.assertNotIn('''decoder_attention_mask''' , lowerCamelCase_)
@require_torch
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(text_target=lowerCamelCase_ , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''')
self.assertEqual(3_2 , targets['''input_ids'''].shape[1])
@require_torch
def UpperCAmelCase__ ( self) -> Union[str, Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''pt''')
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_)
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2))
@require_torch
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = ['''A long paragraph for summarization.''']
UpperCamelCase = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(lowerCamelCase_ , return_tensors='''pt''')
UpperCamelCase = tokenizer(text_target=lowerCamelCase_ , return_tensors='''pt''')
UpperCamelCase = inputs['''input_ids''']
UpperCamelCase = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@require_torch
def UpperCAmelCase__ ( self) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = ['''Summary of the text.''', '''Another summary.''']
UpperCamelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCamelCase = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_)
UpperCamelCase = [[0] * len(lowerCamelCase_) for x in encoded_output['''input_ids''']]
UpperCamelCase = tokenizer.pad(lowerCamelCase_)
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Tuple:
pass
def UpperCAmelCase__ ( self) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = '''A, <mask> AllenNLP sentence.'''
UpperCamelCase = tokenizer_r.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_)
UpperCamelCase = tokenizer_p.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_)
self.assertEqual(sum(tokens_r['''token_type_ids''']) , sum(tokens_p['''token_type_ids''']))
self.assertEqual(
sum(tokens_r['''attention_mask''']) / len(tokens_r['''attention_mask''']) , sum(tokens_p['''attention_mask''']) / len(tokens_p['''attention_mask''']) , )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''])
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''])
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(
lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''])
self.assertSequenceEqual(
lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>''']) | 34 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : str = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = backbone_out_indices
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = backbone_featmap_shape
snake_case__ : List[Any] = scope
snake_case__ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
snake_case__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : Dict = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = DPTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = False
snake_case__ : str = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
snake_case__ : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case__ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case__ : List[str] = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = """add"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
snake_case__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs.predicted_depth
# verify the predicted depth
snake_case__ : Any = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 0 |
import sys
def a ( A__ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = len(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = [[0 for x in range(A__ )] for x in range(A__ )]
SCREAMING_SNAKE_CASE__ : str = [[0 for x in range(A__ )] for x in range(A__ )]
for chain_length in range(2 , A__ ):
for a in range(1 , n - chain_length + 1 ):
SCREAMING_SNAKE_CASE__ : List[str] = a + chain_length - 1
SCREAMING_SNAKE_CASE__ : Any = sys.maxsize
for c in range(A__ , A__ ):
SCREAMING_SNAKE_CASE__ : Dict = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
SCREAMING_SNAKE_CASE__ : Optional[int] = cost
SCREAMING_SNAKE_CASE__ : int = c
return matrix, sol
def a ( A__ , A__ , A__ ) -> List[str]:
'''simple docstring'''
if i == j:
print('''A''' + str(A__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(A__ , A__ , optimal_solution[i][j] )
print_optiomal_solution(A__ , optimal_solution[i][j] + 1 , A__ )
print(''')''' , end=''' ''' )
def a ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
SCREAMING_SNAKE_CASE__ : List[str] = len(A__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = matrix_chain_order(A__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(A__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 35 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : int = botoa.client("""iam""" )
snake_case__ : Union[str, Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) )
snake_case__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__magic_name__ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"]
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , )
snake_case__ : List[Any] = None
if credentials_configuration == 0:
snake_case__ : Dict = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
snake_case__ : List[str] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
snake_case__ : List[str] = _ask_field("""AWS Access Key ID: """ )
snake_case__ : int = aws_access_key_id
snake_case__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ )
snake_case__ : List[str] = aws_secret_access_key
snake_case__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
snake_case__ : Optional[int] = aws_region
snake_case__ : int = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , )
if role_management == 0:
snake_case__ : Optional[Any] = _ask_field("""Enter your IAM role name: """ )
else:
snake_case__ : Optional[int] = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__magic_name__ )
snake_case__ : Dict = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Any = None
if is_custom_docker_image:
snake_case__ : str = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() )
snake_case__ : Tuple = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : List[Any] = None
if is_sagemaker_inputs_enabled:
snake_case__ : str = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Optional[int] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case__ : List[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Tuple = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
snake_case__ : Any = {}
snake_case__ : List[Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
snake_case__ : str = """dynamo_"""
snake_case__ : Tuple = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case__ : List[str] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
snake_case__ : str = _ask_options(
"""Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , )
snake_case__ : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : str = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Dict = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
snake_case__ : List[str] = _ask_options(
__magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case__ : Optional[int] = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" )
snake_case__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case__ : Optional[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , __magic_name__ , default=1 , )
snake_case__ : Union[str, Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
| 38 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[str] = '''timm_backbone'''
def __init__( self ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = backbone
snake_case : Dict = num_channels
snake_case : Optional[int] = features_only
snake_case : Any = use_pretrained_backbone
snake_case : Union[str, Any] = True
snake_case : Optional[int] = out_indices if out_indices is not None else (-1,)
| 36 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Optional[int] = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : Tuple = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
UpperCamelCase : str = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_INIT_CONFIGURATION
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = SqueezeBertTokenizer
def __init__( self : int , lowerCamelCase__ : str=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Dict="[UNK]" , lowerCamelCase__ : str="[SEP]" , lowerCamelCase__ : Tuple="[PAD]" , lowerCamelCase__ : Tuple="[CLS]" , lowerCamelCase__ : str="[MASK]" , lowerCamelCase__ : Any=True , lowerCamelCase__ : Any=None , **lowerCamelCase__ : Tuple , ):
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars
):
a__ : Union[str, Any] = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) )
a__ : List[Any] = do_lower_case
a__ : Optional[Any] = strip_accents
a__ : str = tokenize_chinese_chars
a__ : Any = normalizer_class(**lowerCamelCase__ )
a__ : Any = do_lower_case
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any]=None ):
a__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : List[Any] = [self.sep_token_id]
a__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : List[Any] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 37 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 | 0 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case_ ( __A , __A , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 5_0_2_5_7 , _UpperCamelCase : int = 1_0_2_4 , _UpperCamelCase : int = 7_6_8 , _UpperCamelCase : int = 1_2 , _UpperCamelCase : int = 1_2 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "gelu_new" , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 1e-5 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , ) ->Tuple:
super().__init__()
snake_case_ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
snake_case_ = prefix_inner_dim
snake_case_ = prefix_hidden_dim
snake_case_ = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
snake_case_ = (
nn.Linear(self.prefix_hidden_dim , _UpperCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
snake_case_ = GPTaConfig(
vocab_size=_UpperCamelCase , n_positions=_UpperCamelCase , n_embd=_UpperCamelCase , n_layer=_UpperCamelCase , n_head=_UpperCamelCase , n_inner=_UpperCamelCase , activation_function=_UpperCamelCase , resid_pdrop=_UpperCamelCase , embd_pdrop=_UpperCamelCase , attn_pdrop=_UpperCamelCase , layer_norm_epsilon=_UpperCamelCase , initializer_range=_UpperCamelCase , scale_attn_weights=_UpperCamelCase , use_cache=_UpperCamelCase , scale_attn_by_inverse_layer_idx=_UpperCamelCase , reorder_and_upcast_attn=_UpperCamelCase , )
snake_case_ = GPTaLMHeadModel(_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : torch.Tensor , _UpperCamelCase : torch.Tensor , _UpperCamelCase : Optional[torch.Tensor] = None , _UpperCamelCase : Optional[torch.Tensor] = None , ) ->Any:
snake_case_ = self.transformer.transformer.wte(_UpperCamelCase )
snake_case_ = self.encode_prefix(_UpperCamelCase )
snake_case_ = self.decode_prefix(_UpperCamelCase )
snake_case_ = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
snake_case_ = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
snake_case_ = torch.cat((dummy_token, input_ids) , dim=1 )
snake_case_ = self.transformer(inputs_embeds=_UpperCamelCase , labels=_UpperCamelCase , attention_mask=_UpperCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case__( self : Any , _UpperCamelCase : int , _UpperCamelCase : torch.device ) ->torch.Tensor:
return torch.zeros(_UpperCamelCase , self.prefix_length , dtype=torch.intaa , device=_UpperCamelCase )
def snake_case__( self : int , _UpperCamelCase : List[str] ) ->List[Any]:
return self.encode_prefix(_UpperCamelCase )
@torch.no_grad()
def snake_case__( self : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : str ) ->Tuple:
snake_case_ = torch.split(_UpperCamelCase , 1 , dim=0 )
snake_case_ = []
snake_case_ = []
for feature in features:
snake_case_ = self.decode_prefix(feature.to(_UpperCamelCase ) ) # back to the clip feature
# Only support beam search for now
snake_case_, snake_case_ = self.generate_beam(
input_embeds=_UpperCamelCase , device=_UpperCamelCase , eos_token_id=_UpperCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
snake_case_ = torch.stack(_UpperCamelCase )
snake_case_ = torch.stack(_UpperCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case__( self : Any , _UpperCamelCase : List[Any]=None , _UpperCamelCase : int=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : int = 5 , _UpperCamelCase : int = 6_7 , _UpperCamelCase : float = 1.0 , _UpperCamelCase : Optional[int] = None , ) ->List[str]:
snake_case_ = eos_token_id
snake_case_ = None
snake_case_ = None
snake_case_ = torch.ones(_UpperCamelCase , device=_UpperCamelCase , dtype=torch.int )
snake_case_ = torch.zeros(_UpperCamelCase , device=_UpperCamelCase , dtype=torch.bool )
if input_embeds is not None:
snake_case_ = input_embeds
else:
snake_case_ = self.transformer.transformer.wte(_UpperCamelCase )
for i in range(_UpperCamelCase ):
snake_case_ = self.transformer(inputs_embeds=_UpperCamelCase )
snake_case_ = outputs.logits
snake_case_ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
snake_case_ = logits.softmax(-1 ).log()
if scores is None:
snake_case_, snake_case_ = logits.topk(_UpperCamelCase , -1 )
snake_case_ = generated.expand(_UpperCamelCase , *generated.shape[1:] )
snake_case_, snake_case_ = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
snake_case_ = next_tokens
else:
snake_case_ = tokens.expand(_UpperCamelCase , *tokens.shape[1:] )
snake_case_ = torch.cat((tokens, next_tokens) , dim=1 )
else:
snake_case_ = -float(np.inf )
snake_case_ = 0
snake_case_ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
snake_case_ = scores_sum / seq_lengths[:, None]
snake_case_, snake_case_ = scores_sum_average.view(-1 ).topk(_UpperCamelCase , -1 )
snake_case_ = next_tokens // scores_sum.shape[1]
snake_case_ = seq_lengths[next_tokens_source]
snake_case_ = next_tokens % scores_sum.shape[1]
snake_case_ = next_tokens.unsqueeze(1 )
snake_case_ = tokens[next_tokens_source]
snake_case_ = torch.cat((tokens, next_tokens) , dim=1 )
snake_case_ = generated[next_tokens_source]
snake_case_ = scores_sum_average * seq_lengths
snake_case_ = is_stopped[next_tokens_source]
snake_case_ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
snake_case_ = torch.cat((generated, next_token_embed) , dim=1 )
snake_case_ = is_stopped + next_tokens.eq(_UpperCamelCase ).squeeze()
if is_stopped.all():
break
snake_case_ = scores / seq_lengths
snake_case_ = scores.argsort(descending=_UpperCamelCase )
# tokens tensors are already padded to max_seq_length
snake_case_ = [tokens[i] for i in order]
snake_case_ = torch.stack(_UpperCamelCase , dim=0 )
snake_case_ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths | 39 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''resnet'''
lowerCamelCase__ = ['''basic''', '''bottleneck''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="bottleneck" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
snake_case__ : List[Any] = num_channels
snake_case__ : str = embedding_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : Dict = depths
snake_case__ : List[Any] = layer_type
snake_case__ : int = hidden_act
snake_case__ : Union[str, Any] = downsample_in_first_stage
snake_case__ : Dict = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Any = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-3
| 38 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 38 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
snake_case__ : str = []
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_init_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_train_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_epoch_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_begin""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_step_end""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_evaluate""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_predict""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_save""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_log""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
self.events.append("""on_prediction_step""" )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Tuple = tempfile.mkdtemp()
def __UpperCamelCase ( self ):
shutil.rmtree(self.output_dir )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : List[Any] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionModelConfig(a=__SCREAMING_SNAKE_CASE , b=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = RegressionPreTrainedModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = TrainingArguments(self.output_dir , disable_tqdm=__SCREAMING_SNAKE_CASE , report_to=[] , **__SCREAMING_SNAKE_CASE )
return Trainer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , callbacks=__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
snake_case__ : Tuple = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , __SCREAMING_SNAKE_CASE )
else:
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = len(trainer.get_eval_dataloader() )
snake_case__ : Any = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase ( self ):
snake_case__ : Any = self.get_trainer()
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Optional[Any] = self.get_trainer(disable_tqdm=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : int = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.get_trainer()
snake_case__ : List[str] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.remove(__SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
trainer.add_callback(__SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , __SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# A bit of everything
snake_case__ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__SCREAMING_SNAKE_CASE , self.get_expected_events(__SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 38 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if num < 0:
return False
lowerCamelCase_ = num
lowerCamelCase_ = 0
while num > 0:
lowerCamelCase_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ):
snake_case__ : Any = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = min_resolution
snake_case__ : List[Any] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : int = size
snake_case__ : Tuple = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : Union[str, Any] = image_std
def __UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ):
snake_case__ : str = DPTImageProcessingTester(self )
@property
def __UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8} )
snake_case__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : Any = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __UpperCamelCase ( self ):
# Initialize image_processing
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 38 | 0 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[Any] = DownBlockaD # noqa F405
_lowercase : Dict = '''down'''
def lowerCamelCase_ ( self: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405
_lowercase : Tuple = '''down'''
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = AttnDownBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = CrossAttnDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405
_lowercase : str = '''down'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = SkipDownBlockaD # noqa F405
_lowercase : Tuple = '''down'''
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405
_lowercase : Optional[int] = '''down'''
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = DownEncoderBlockaD # noqa F405
_lowercase : List[Any] = '''down'''
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405
_lowercase : int = '''down'''
@property
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405
_lowercase : Union[str, Any] = '''mid'''
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405
_lowercase : str = '''mid'''
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowercase : str = '''mid'''
@property
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = UpBlockaD # noqa F405
_lowercase : Any = '''up'''
@property
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405
_lowercase : List[Any] = '''up'''
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Any = CrossAttnUpBlockaD # noqa F405
_lowercase : List[str] = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
_lowercase : Dict = '''up'''
@property
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common()
lowercase__ = 32
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnUpBlockaD # noqa F405
_lowercase : Optional[Any] = '''up'''
@property
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCamelCase_ ( self: List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = SkipUpBlockaD # noqa F405
_lowercase : Optional[int] = '''up'''
@property
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = AttnSkipUpBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Dict = UpDecoderBlockaD # noqa F405
_lowercase : Tuple = '''up'''
@property
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCamelCase_ )
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
_lowercase : str = '''up'''
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = {'''in_channels''': 32, '''out_channels''': 32}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCamelCase_ )
| 43 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """num_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1_6, 4_8, 9_6] , __SCREAMING_SNAKE_CASE=[1, 3, 6] , __SCREAMING_SNAKE_CASE=[1, 2, 1_0] , __SCREAMING_SNAKE_CASE=[7, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2] , __SCREAMING_SNAKE_CASE=[2, 1, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 2] , __SCREAMING_SNAKE_CASE=[False, False, True] , __SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , ):
snake_case__ : List[str] = parent
snake_case__ : Tuple = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : List[Any] = patch_sizes
snake_case__ : Optional[int] = patch_stride
snake_case__ : Optional[Any] = patch_padding
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : Dict = num_labels
snake_case__ : Optional[Any] = num_channels
snake_case__ : Optional[Any] = embed_dim
snake_case__ : Optional[int] = num_heads
snake_case__ : Optional[int] = stride_kv
snake_case__ : int = depth
snake_case__ : Optional[Any] = cls_token
snake_case__ : List[Any] = attention_drop_rate
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : List[Any] = layer_norm_eps
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = TFCvtModel(config=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = (self.image_size, self.image_size)
snake_case__ , snake_case__ : str = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ : Optional[int] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : str = TFCvtForImageClassification(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtModelTester(self )
snake_case__ : Any = TFCvtConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __UpperCamelCase ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def __UpperCamelCase ( self ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __UpperCamelCase ( self ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__SCREAMING_SNAKE_CASE )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Tuple = len(self.model_tester.depth )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFCvtModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : int = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
def __init__( self : List[str],__A : int,__A : Optional[int]=1_3,__A : Any=7,__A : Union[str, Any]=True,__A : Optional[int]=True,__A : List[str]=True,__A : Tuple=True,__A : Any=9_9,__A : Any=3_2,__A : Any=5,__A : Tuple=4,__A : List[str]=3_7,__A : Union[str, Any]="gelu",__A : Tuple=0.1,__A : Tuple=0.1,__A : Optional[int]=5_1_2,__A : Union[str, Any]=1_6,__A : Dict=2,__A : Any=0.02,__A : Optional[Any]=3,__A : int=4,__A : Optional[Any]=None,):
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Any = seq_length
_lowerCamelCase : int = is_training
_lowerCamelCase : int = use_input_mask
_lowerCamelCase : Any = use_token_type_ids
_lowerCamelCase : int = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Dict = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : str = num_labels
_lowerCamelCase : List[str] = num_choices
_lowerCamelCase : Dict = scope
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : int = None
if self.use_input_mask:
_lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[int] = None
if self.use_token_type_ids:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Dict = None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Dict = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
_lowerCamelCase : Any = ids_tensor([self.batch_size],self.num_choices )
_lowerCamelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : List[Any] ):
return NystromformerConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
def lowerCamelCase_ ( self : str,__A : str,__A : Dict,__A : List[str],__A : List[Any],__A : Dict,__A : Optional[Any],__A : Any ):
_lowerCamelCase : int = NystromformerModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = model(__A,attention_mask=__A,token_type_ids=__A )
_lowerCamelCase : Optional[Any] = model(__A,token_type_ids=__A )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Any,__A : Union[str, Any],__A : Dict,__A : Union[str, Any],__A : Optional[int],__A : Tuple,__A : Union[str, Any],__A : List[Any] ):
_lowerCamelCase : str = NystromformerForMaskedLM(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A,token_type_ids=__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Union[str, Any],__A : List[Any],__A : Any,__A : Tuple,__A : List[str],__A : Union[str, Any],__A : Tuple,__A : Optional[int] ):
_lowerCamelCase : str = NystromformerForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Any = model(
__A,attention_mask=__A,token_type_ids=__A,start_positions=__A,end_positions=__A,)
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Dict,__A : Dict,__A : str,__A : List[Any],__A : List[str],__A : Union[str, Any],__A : Tuple,__A : Any ):
_lowerCamelCase : List[Any] = self.num_labels
_lowerCamelCase : int = NystromformerForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A,attention_mask=__A,token_type_ids=__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : List[Any],__A : str,__A : Dict,__A : Tuple,__A : Optional[int],__A : Any,__A : Optional[Any],__A : List[Any] ):
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : str = NystromformerForTokenClassification(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[int] = model(__A,attention_mask=__A,token_type_ids=__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Union[str, Any],__A : int,__A : str,__A : List[Any],__A : Union[str, Any],__A : Optional[int],__A : Tuple,__A : List[str] ):
_lowerCamelCase : Optional[int] = self.num_choices
_lowerCamelCase : Dict = NystromformerForMultipleChoice(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
_lowerCamelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
_lowerCamelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
_lowerCamelCase : List[Any] = model(
__A,attention_mask=__A,token_type_ids=__A,labels=__A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Any = config_and_inputs
_lowerCamelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : int = NystromformerModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self,config_class=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : int ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : List[str] = type
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def lowerCamelCase_ ( self : str ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = NystromformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Dict = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(__A )[0]
_lowerCamelCase : int = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape,__A )
_lowerCamelCase : str = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3],__A,atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[int] = "the [MASK] of Belgium is Brussels"
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
_lowerCamelCase : Dict = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
_lowerCamelCase : Optional[Any] = tokenizer(__A,return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(encoding.input_ids ).logits
_lowerCamelCase : Optional[Any] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__A ),"capital" ) | 44 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : int = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : Any = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(1 )
snake_case__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Tuple = dc.update(2 )
snake_case__ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(3 )
snake_case__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : int = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 38 | 0 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ):
UpperCamelCase__ :Any = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :int = use_token_type_ids
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :List[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :Union[str, Any] = type_sequence_label_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Tuple = shape_size
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = num_choices
UpperCamelCase__ :Tuple = scope
UpperCamelCase__ :str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ :List[str] = text_seq_length
UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length
def __a ( self :Tuple ):
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase__ :str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :List[str] = bbox[i, j, 3]
UpperCamelCase__ :Optional[int] = bbox[i, j, 1]
UpperCamelCase__ :Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :Tuple = bbox[i, j, 2]
UpperCamelCase__ :Optional[Any] = bbox[i, j, 0]
UpperCamelCase__ :List[str] = tmp_coordinate
UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Any = None
if self.use_input_mask:
UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ :Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ):
UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Dict = 2
UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
UpperCamelCase__ :int = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs
UpperCamelCase__ :List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Tuple = False
def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ):
return True
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ):
UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase__ :Optional[Any] = -1_00
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters
UpperCamelCase__ :str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase__ :Any = {0: """input_ids"""}
for label_key in label_keys:
UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = label_key
UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase__ :Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase__ :List[str] = prepared_for_class[value]
UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ )
# Send to model
UpperCamelCase__ :str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Dict = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
UpperCamelCase__ :List[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values
UpperCamelCase__ :str = tf.constant([[1, 2]] )
UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :int = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) ) | 45 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
if len(_lowerCamelCase ) <= 1 or n <= 1:
return
insert_next(_lowerCamelCase , n - 1 )
rec_insertion_sort(_lowerCamelCase , n - 1 )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
if index >= len(_lowerCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_lowerCamelCase, _lowerCamelCase : Dict = (
collection[index],
collection[index - 1],
)
insert_next(_lowerCamelCase , index + 1 )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = input('''Enter integers separated by spaces: ''')
_lowerCAmelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list) | 46 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = None
if token is not None:
snake_case__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Tuple = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : Dict = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case__ : Union[str, Any] = requests.get(__magic_name__ , headers=__magic_name__ ).json()
snake_case__ : Dict = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
snake_case__ : List[Any] = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__magic_name__ ):
snake_case__ : Dict = requests.get(url + f"&page={i + 2}" , headers=__magic_name__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = None
if token is not None:
snake_case__ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
snake_case__ : str = requests.get(__magic_name__ , headers=__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : Any = result.headers["""Location"""]
snake_case__ : Tuple = requests.get(__magic_name__ , allow_redirects=__magic_name__ )
snake_case__ : int = os.path.join(__magic_name__ , f"{artifact_name}.zip" )
with open(__magic_name__ , """wb""" ) as fp:
fp.write(response.content )
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = []
snake_case__ : Any = None
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__magic_name__ ) as f:
for line in f:
snake_case__ : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case__ : str = line[: line.index(""": """ )]
snake_case__ : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
snake_case__ : Dict = line[len("""FAILED """ ) :]
failed_tests.append(__magic_name__ )
elif filename == "job_name.txt":
snake_case__ : Optional[Any] = line
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(__magic_name__ )} for `errors` "
f"and {len(__magic_name__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
snake_case__ : Optional[Any] = None
if job_name and job_links:
snake_case__ : Optional[Any] = job_links.get(__magic_name__ , __magic_name__ )
# A list with elements of the form (line of error, error, failed test)
snake_case__ : List[Any] = [x + [y] + [job_link] for x, y in zip(__magic_name__ , __magic_name__ )]
return result
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = []
snake_case__ : Dict = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__magic_name__ , job_links=__magic_name__ ) )
return errors
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=None ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = Counter()
counter.update([x[1] for x in logs] )
snake_case__ : Dict = counter.most_common()
snake_case__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case__ : int = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
snake_case__ : Tuple = test.split("""/""" )[2]
else:
snake_case__ : Any = None
return test
def UpperCamelCase__ ( __magic_name__ : str , __magic_name__ : Union[str, Any]=None ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case__ : List[Any] = [x for x in logs if x[2] is not None]
snake_case__ : Any = {x[2] for x in logs}
snake_case__ : Optional[Any] = {}
for test in tests:
snake_case__ : str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case__ : Optional[int] = counter.most_common()
snake_case__ : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case__ : int = sum(error_counts.values() )
if n_errors > 0:
snake_case__ : str = {"""count""": n_errors, """errors""": error_counts}
snake_case__ : Union[str, Any] = dict(sorted(r.items() , key=lambda __magic_name__ : item[1]["count"] , reverse=__magic_name__ ) )
return r
def UpperCamelCase__ ( __magic_name__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = """| no. | error | status |"""
snake_case__ : int = """|-:|:-|:-|"""
snake_case__ : int = [header, sep]
for error in reduced_by_error:
snake_case__ : Union[str, Any] = reduced_by_error[error]["""count"""]
snake_case__ : Dict = f"| {count} | {error[:1_00]} | |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = """| model | no. of errors | major error | count |"""
snake_case__ : Optional[int] = """|-:|-:|-:|-:|"""
snake_case__ : Dict = [header, sep]
for model in reduced_by_model:
snake_case__ : Tuple = reduced_by_model[model]["""count"""]
snake_case__ , snake_case__ : Tuple = list(reduced_by_model[model]["""errors"""].items() )[0]
snake_case__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__magic_name__ )
return "\n".join(__magic_name__ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A_ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
A_ : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : int = k.find(" / ")
A_ : List[Any] = k[index + len(" / ") :]
A_ : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : Any = reduce_by_error(errors)
A_ : Union[str, Any] = reduce_by_model(errors)
A_ : Any = make_github_table(reduced_by_error)
A_ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 38 | 0 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 47 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A_ : Tuple = get_logger(__name__)
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = '''dummy_data'''
lowerCamelCase__ = '''datasets'''
lowerCamelCase__ = False
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = dataset_name
snake_case__ : Optional[int] = cache_dir
snake_case__ : Union[str, Any] = use_local_dummy_data
snake_case__ : int = config
# download_callbacks take a single url as input
snake_case__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
snake_case__ : Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
snake_case__ : Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
# to be downloaded
snake_case__ : List[str] = None
snake_case__ : List[str] = None
@property
def __UpperCamelCase ( self ):
if self._dummy_file is None:
snake_case__ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __UpperCamelCase ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
snake_case__ : Optional[int] = cached_path(
__SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=__SCREAMING_SNAKE_CASE , force_extract=__SCREAMING_SNAKE_CASE )
return os.path.join(__SCREAMING_SNAKE_CASE , self.dummy_file_name )
@property
def __UpperCamelCase ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __UpperCamelCase ( self ):
if self._bucket_url is None:
snake_case__ : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def __UpperCamelCase ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
snake_case__ : List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
snake_case__ : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.create_dummy_data_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self.create_dummy_data_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return self.create_dummy_data_single(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.download_and_extract(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return path
def __UpperCamelCase ( self ):
return {}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for single_url in single_urls:
download_callback(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = single_urls
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = [os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls]
else:
snake_case__ : List[Any] = single_urls
snake_case__ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(__SCREAMING_SNAKE_CASE ).name ) )
snake_case__ : Optional[int] = value
# make sure that values are unique
if all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
snake_case__ : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
snake_case__ : Tuple = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __SCREAMING_SNAKE_CASE ) ) for url in data_url )
snake_case__ : List[Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
snake_case__ : List[str] = [data_url[0]] * len(__SCREAMING_SNAKE_CASE )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__SCREAMING_SNAKE_CASE )
return dummy_data_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for download_callback in self.download_callbacks:
download_callback(__SCREAMING_SNAKE_CASE )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
snake_case__ : Any = os.path.join(__SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
def _iter_archive_members(__SCREAMING_SNAKE_CASE ):
# this preserves the order of the members inside the ZIP archive
snake_case__ : List[str] = Path(self.dummy_file ).parent
snake_case__ : Dict = path.relative_to(__SCREAMING_SNAKE_CASE )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
snake_case__ : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = Path(__SCREAMING_SNAKE_CASE )
snake_case__ : int = _iter_archive_members(__SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__SCREAMING_SNAKE_CASE ).as_posix(), file_path.open("""rb""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = [paths]
for path in paths:
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
if os.path.basename(__SCREAMING_SNAKE_CASE ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__SCREAMING_SNAKE_CASE ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
snake_case__ :List[Any] = ViTImageProcessor if is_vision_available() else None
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = (3, 32, 128)
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__magic_name__ ) + "\n" )
lowerCAmelCase__ = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __magic_name__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__magic_name__ : Optional[int] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__magic_name__ : Optional[int] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
lowerCAmelCase__ = Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) )
return image_input
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__magic_name__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
lowerCAmelCase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__magic_name__ , return_tensors="np" )
lowerCAmelCase__ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = "test"
lowerCAmelCase__ = processor(text=__magic_name__ )
lowerCAmelCase__ = tokenizer(__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = "test"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.char_decode(__magic_name__ )
lowerCAmelCase__ = tokenizer.batch_decode(__magic_name__ )
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = MgpstrProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
lowerCAmelCase__ = torch.randn(1 , 27 , 38 )
lowerCAmelCase__ = torch.randn(1 , 27 , 50257 )
lowerCAmelCase__ = torch.randn(1 , 27 , 30522 )
lowerCAmelCase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 48 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFImgaImgSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self ):
return self._get_superresolution_dummy_components()
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ):
self._test_save_load_local()
def __UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 38 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 | 0 |
'''simple docstring'''
from __future__ import annotations
UpperCamelCase : List[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ = {}
lowerCamelCase__ = source_vertex
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.source_vertex}
lowerCamelCase__ = None
lowerCamelCase__ = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_lowerCAmelCase )
lowerCamelCase__ = vertex
queue.append(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ = self.parent.get(_lowerCAmelCase )
if target_vertex_parent is None:
lowerCamelCase__ = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_lowerCAmelCase )
return self.shortest_path(_lowerCAmelCase ) + F'''->{target_vertex}'''
if __name__ == "__main__":
UpperCamelCase : List[Any] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 50 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(__magic_name__ ) / len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =CTRLTokenizer
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
UpperCAmelCase = dict(zip(a__ , range(len(a__ ) ) ) )
UpperCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
UpperCAmelCase = {'''unk_token''': '''<unk>'''}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a__ ) )
def __snake_case ( self : Union[str, Any] , **a__ : int ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __snake_case ( self : List[str] , a__ : List[Any] ):
UpperCAmelCase = '''adapt react readapt apt'''
UpperCAmelCase = '''adapt react readapt apt'''
return input_text, output_text
def __snake_case ( self : int ):
UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase = '''adapt react readapt apt'''
UpperCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
UpperCAmelCase = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 51 |
'''simple docstring'''
from __future__ import annotations
A_ : str = "Muhammad Umer Farooq"
A_ : Optional[Any] = "MIT"
A_ : int = "1.0.0"
A_ : int = "Muhammad Umer Farooq"
A_ : int = "[email protected]"
A_ : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : list[str] = []
snake_case__ : List[Any] = domain
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ : str = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__magic_name__ ).split(""".""" )[-2:] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return parse.urlparse(__magic_name__ ).netloc
def UpperCamelCase__ ( __magic_name__ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case__ : Optional[Any] = Parser(__magic_name__ )
try:
# Open URL
snake_case__ : Any = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ : Tuple = requests.get(__magic_name__ )
# Get the valid email.
snake_case__ : List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
A_ : str = emails_from_url("https://github.com")
print(F'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 38 | 0 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
A = cvtColor(img, COLOR_BGR2GRAY)
def __A ( ) -> Optional[int]:
__a : List[str] = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def __A ( ) -> List[Any]:
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def __A ( ) -> List[Any]:
__a : int = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def __A ( ) -> Dict:
__a : Dict = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
__a : List[str] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def __A ( ) -> Dict:
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def __A ( ) -> str:
# laplace diagonals
__a : Dict = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]])
__a : Optional[int] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def __A ( ) -> Dict:
assert med.median_filter(a_ , 3).any()
def __A ( ) -> Union[str, Any]:
__a , __a : List[Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def __A ( ) -> Tuple:
__a : Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def __A ( a_ :str = "digital_image_processing/image_data/lena_small.jpg") -> Tuple:
__a : Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def __A ( a_ :str = "digital_image_processing/image_data/lena_small.jpg" , ) -> str:
__a : List[Any] = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def __A ( ) -> List[Any]:
__a : List[Any] = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
__a : Union[str, Any] = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
__a : Union[str, Any] = 0
__a : Union[str, Any] = 0
__a : List[Any] = image[x_coordinate][y_coordinate]
__a : Optional[int] = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__a : str = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
__a : Dict = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any() | 52 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 | 0 |
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = ''
for i in table:
res += inp[i - 1]
return res
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
return data[1:] + data[0]
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = ''
for i in range(len(lowerCAmelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = int('0b' + data[0] + data[-1], 2 )
__lowerCAmelCase = int('0b' + data[1:3], 2 )
return bin(s[row][col] )[2:]
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Any, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Any ):
__lowerCAmelCase = message[:4]
__lowerCAmelCase = message[4:]
__lowerCAmelCase = apply_table(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = xor(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = apply_sbox(lowerCAmelCase_, temp[:4] ) # noqa: E741
__lowerCAmelCase = apply_sbox(lowerCAmelCase_, temp[4:] )
__lowerCAmelCase = '0' * (2 - len(lowerCAmelCase_ )) + l # noqa: E741
__lowerCAmelCase = '0' * (2 - len(lowerCAmelCase_ )) + r
__lowerCAmelCase = apply_table(l + r, lowerCAmelCase_ )
__lowerCAmelCase = xor(lowerCAmelCase_, lowerCAmelCase_ )
return temp + right
if __name__ == "__main__":
_snake_case : str = input('Enter 10 bit key: ')
_snake_case : Any = input('Enter 8 bit message: ')
_snake_case : Tuple = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case : List[Any] = [2, 4, 3, 1]
_snake_case : Tuple = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case : Union[str, Any] = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case : Optional[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case : Optional[Any] = apply_table(key, paa_table)
_snake_case : Any = temp[:5]
_snake_case : Dict = temp[5:]
_snake_case : Dict = left_shift(left)
_snake_case : Any = left_shift(right)
_snake_case : Optional[int] = apply_table(left + right, pa_table)
_snake_case : Optional[Any] = left_shift(left)
_snake_case : Any = left_shift(right)
_snake_case : Tuple = left_shift(left)
_snake_case : List[str] = left_shift(right)
_snake_case : Optional[Any] = apply_table(left + right, pa_table)
# encryption
_snake_case : Any = apply_table(message, IP)
_snake_case : Optional[Any] = function(expansion, sa, sa, keya, temp)
_snake_case : Optional[int] = temp[4:] + temp[:4]
_snake_case : Optional[Any] = function(expansion, sa, sa, keya, temp)
_snake_case : str = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
_snake_case : Tuple = apply_table(CT, IP)
_snake_case : Dict = function(expansion, sa, sa, keya, temp)
_snake_case : Optional[int] = temp[4:] + temp[:4]
_snake_case : Any = function(expansion, sa, sa, keya, temp)
_snake_case : Optional[Any] = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 53 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38 | 0 |
class A :
def __init__( self: List[Any] ) -> None:
'''simple docstring'''
UpperCAmelCase_ ={} # Mapping from char to TrieNode
UpperCAmelCase_ =False
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: str ) -> None:
'''simple docstring'''
UpperCAmelCase_ =self
for char in word:
if char not in curr.nodes:
UpperCAmelCase_ =TrieNode()
UpperCAmelCase_ =curr.nodes[char]
UpperCAmelCase_ =True
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: str ) -> bool:
'''simple docstring'''
UpperCAmelCase_ =self
for char in word:
if char not in curr.nodes:
return False
UpperCAmelCase_ =curr.nodes[char]
return curr.is_leaf
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: str ) -> None:
'''simple docstring'''
def _delete(_lowerCAmelCase: TrieNode , _lowerCAmelCase: str , _lowerCAmelCase: int ) -> bool:
if index == len(_lowerCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCAmelCase_ =False
return len(curr.nodes ) == 0
UpperCAmelCase_ =word[index]
UpperCAmelCase_ =curr.nodes.get(_lowerCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCAmelCase_ =_delete(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _lowerCAmelCase , 0 )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if node.is_leaf:
print(lowercase__ , end=" " )
for key, value in node.nodes.items():
print_words(lowercase__ , word + key )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ ="banana bananas bandana band apple all beast".split()
UpperCAmelCase_ =TrieNode()
root.insert_many(lowercase__ )
# print_words(root, "")
assert all(root.find(lowercase__ ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
print(str(lowercase__ ) , "works!" if passes else "doesn't work :(" )
def a__ ( ):
'''simple docstring'''
assert test_trie()
def a__ ( ):
'''simple docstring'''
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 54 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38 | 0 |
from manim import *
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : str ):
__A = Rectangle(height=0.5 ,width=0.5 )
__A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
__A = [mem.copy() for i in range(6 )]
__A = [mem.copy() for i in range(6 )]
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = VGroup(A ,A ).arrange(A ,buff=0 )
__A = Text("CPU" ,font_size=24 )
__A = Group(A ,A ).arrange(A ,buff=0.5 ,aligned_edge=A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A )
__A = [mem.copy() for i in range(1 )]
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = Text("GPU" ,font_size=24 )
__A = Group(A ,A ).arrange(A ,buff=0.5 ,aligned_edge=A )
gpu.align_to(A ,A )
gpu.set_x(gpu.get_x() - 1 )
self.add(A )
__A = [mem.copy() for i in range(6 )]
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = Text("Model" ,font_size=24 )
__A = Group(A ,A ).arrange(A ,buff=0.5 ,aligned_edge=A )
model.move_to([3, -1.0, 0] )
self.play(
Create(A ,run_time=1 ) ,Create(A ,run_time=1 ) ,Create(A ,run_time=1 ) ,)
__A = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
__A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__A = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(A ,run_time=2.5 ) ,Write(A ) ,Write(A ) )
self.add(A )
__A = []
__A = []
__A = []
for i, rect in enumerate(A ):
__A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(A ,opacity=0.7 )
cpu_target.move_to(A )
cpu_target.generate_target()
__A = 0.46 / 4
__A = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=A )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=A ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=A ,buff=0.0 )
cpu_targs.append(A )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(A ) )
second_animations.append(MoveToTarget(A ,run_time=1.5 ) )
self.play(*A )
self.play(*A )
self.wait()
| 55 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : str=False ) -> Tuple:
'''simple docstring'''
snake_case__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = dct.pop(__magic_name__ )
snake_case__ : Dict = val
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : int=False ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=__magic_name__ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=3_84 , num_labels=10_00 )
snake_case__ : Union[str, Any] = False
# load original model from timm
snake_case__ : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case__ : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case__ : str = """huggingface/label-files"""
snake_case__ : Union[str, Any] = """imagenet-1k-id2label.json"""
snake_case__ : Dict = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case__ : int = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case__ : Union[str, Any] = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case__ : Optional[Any] = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case__ : Union[str, Any] = transform.transforms
snake_case__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : Any = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : int = transform(__magic_name__ ).unsqueeze(0 )
snake_case__ : List[str] = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case__ : Optional[Any] = model(__magic_name__ )
snake_case__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Dict = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
A_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 38 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "sentencepiece.bpe.model"}
_a : List[Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_a : Union[str, Any] = {
"moussaKam/mbarthez": 1_024,
"moussaKam/barthez": 1_024,
"moussaKam/barthez-orangesum-title": 1_024,
}
_a : Dict = "▁"
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Tuple="<s>" , SCREAMING_SNAKE_CASE_ : List[str]="<unk>" , SCREAMING_SNAKE_CASE_ : Any="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
__snake_case = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__snake_case = len(self.sp_model ) - 1
__snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a ( self : List[str] ) -> Dict:
return len(self.sp_model )
def a ( self : Optional[int] ) -> List[str]:
__snake_case = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
return spm_id if spm_id else self.unk_token_id
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
__snake_case = []
__snake_case = ''
__snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
__snake_case = True
__snake_case = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
__snake_case = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : List[str] ) -> Tuple:
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 56 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=True , ):
super().__init__()
snake_case__ : str = layers_per_block
snake_case__ : int = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : List[Any] = None
snake_case__ : List[Any] = nn.ModuleList([] )
# down
snake_case__ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = output_channel
snake_case__ : Union[str, Any] = block_out_channels[i]
snake_case__ : int = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : str = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
snake_case__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
snake_case__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : str = 2 * out_channels if double_z else out_channels
snake_case__ : int = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : Union[str, Any] = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = x
snake_case__ : int = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
snake_case__ : List[str] = down_block(__SCREAMING_SNAKE_CASE )
# middle
snake_case__ : str = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
snake_case__ : Any = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE="group" , ):
super().__init__()
snake_case__ : Any = layers_per_block
snake_case__ : Optional[Any] = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = nn.ModuleList([] )
snake_case__ : Optional[int] = in_channels if norm_type == """spatial""" else None
# mid
snake_case__ : Tuple = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
snake_case__ : List[Any] = list(reversed(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = output_channel
snake_case__ : Optional[Any] = reversed_block_out_channels[i]
snake_case__ : List[str] = i == len(__SCREAMING_SNAKE_CASE ) - 1
snake_case__ : int = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : int = output_channel
# out
if norm_type == "spatial":
snake_case__ : List[Any] = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
snake_case__ : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 )
snake_case__ : Tuple = nn.SiLU()
snake_case__ : Union[str, Any] = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
snake_case__ : int = False
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Union[str, Any] = z
snake_case__ : Any = self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
snake_case__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
snake_case__ : int = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
snake_case__ : List[Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
snake_case__ : Dict = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
snake_case__ : Optional[Any] = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : str = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.conv_act(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="random" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ):
super().__init__()
snake_case__ : int = n_e
snake_case__ : Optional[int] = vq_embed_dim
snake_case__ : int = beta
snake_case__ : Optional[int] = legacy
snake_case__ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ : List[str] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
snake_case__ : Optional[Any] = self.used.shape[0]
snake_case__ : List[str] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ : Dict = self.re_embed
snake_case__ : List[str] = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
snake_case__ : Union[str, Any] = n_e
snake_case__ : str = sane_index_shape
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : Dict = inds.reshape(ishape[0] , -1 )
snake_case__ : Any = self.used.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ : List[Any] = match.argmax(-1 )
snake_case__ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ : Optional[Any] = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
snake_case__ : int = inds.reshape(ishape[0] , -1 )
snake_case__ : Optional[int] = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ : List[Any] = 0 # simply set to zero
snake_case__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# reshape z -> (batch, height, width, channel) and flatten
snake_case__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ : Dict = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
snake_case__ : Union[str, Any] = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
snake_case__ : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ : str = self.remap_to_used(__SCREAMING_SNAKE_CASE )
snake_case__ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ : Optional[int] = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ : int = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
snake_case__ : str = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
snake_case__ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : Tuple = parameters
snake_case__ , snake_case__ : Any = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
snake_case__ : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ : Optional[int] = deterministic
snake_case__ : Optional[int] = torch.exp(0.5 * self.logvar )
snake_case__ : Any = torch.exp(self.logvar )
if self.deterministic:
snake_case__ : List[str] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ):
# make sure sample is on the same device as the parameters and has same dtype
snake_case__ : Dict = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ : Optional[int] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.mean
| 38 | 0 |
def snake_case () -> str:
UpperCamelCase_: Optional[Any] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
UpperCamelCase_: List[str] = 6
UpperCamelCase_: Optional[Any] = 1
UpperCamelCase_: Tuple = 1_9_0_1
UpperCamelCase_: List[str] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
UpperCamelCase_: Optional[Any] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
UpperCamelCase_: Dict = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
UpperCamelCase_: Dict = day - days_per_month[month - 2]
if month > 1_2:
year += 1
UpperCamelCase_: Dict = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution()) | 57 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : str = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = backbone_out_indices
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = backbone_featmap_shape
snake_case__ : List[Any] = scope
snake_case__ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
snake_case__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : Dict = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = DPTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = False
snake_case__ : str = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
snake_case__ : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case__ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case__ : List[str] = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = """add"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
snake_case__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs.predicted_depth
# verify the predicted depth
snake_case__ : Any = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 0 |
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _lowerCAmelCase :
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.get_dummy_input()
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def UpperCAmelCase__ ( self , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False , ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = 4
snake_case_ : Optional[Any] = 3_2
snake_case_ : int = (3_2, 3_2)
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Union[str, Any] = torch.device(_lowercase )
snake_case_ : Union[str, Any] = (batch_size, num_channels) + sizes
snake_case_ : List[Any] = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase )
snake_case_ : str = {"""hidden_states""": hidden_states}
if include_temb:
snake_case_ : Union[str, Any] = 1_2_8
snake_case_ : Dict = randn_tensor((batch_size, temb_channels) , generator=_lowercase , device=_lowercase )
if include_res_hidden_states_tuple:
snake_case_ : Any = torch.manual_seed(1 )
snake_case_ : Tuple = (randn_tensor(_lowercase , generator=_lowercase , device=_lowercase ),)
if include_encoder_hidden_states:
snake_case_ : Optional[Any] = floats_tensor((batch_size, 3_2, 3_2) ).to(_lowercase )
if include_skip_sample:
snake_case_ : List[str] = randn_tensor(((batch_size, 3) + sizes) , generator=_lowercase , device=_lowercase )
return dummy_input
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = {
"""in_channels""": 3_2,
"""out_channels""": 3_2,
"""temb_channels""": 1_2_8,
}
if self.block_type == "up":
snake_case_ : Optional[int] = 3_2
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
snake_case_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ , snake_case_ : str = self.prepare_init_args_and_inputs_for_common()
snake_case_ : int = self.block_class(**_lowercase )
unet_block.to(_lowercase )
unet_block.eval()
with torch.no_grad():
snake_case_ : Dict = unet_block(**_lowercase )
if isinstance(_lowercase , _lowercase ):
snake_case_ : Dict = output[0]
self.assertEqual(output.shape , self.output_shape )
snake_case_ : int = output[0, -1, -3:, -3:]
snake_case_ : Dict = torch.tensor(_lowercase ).to(_lowercase )
assert torch_all_close(output_slice.flatten() , _lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ , snake_case_ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
snake_case_ : Union[str, Any] = self.block_class(**_lowercase )
model.to(_lowercase )
model.train()
snake_case_ : Optional[int] = model(**_lowercase )
if isinstance(_lowercase , _lowercase ):
snake_case_ : List[str] = output[0]
snake_case_ : int = torch.device(_lowercase )
snake_case_ : Dict = randn_tensor(output.shape , device=_lowercase )
snake_case_ : Optional[Any] = torch.nn.functional.mse_loss(_lowercase , _lowercase )
loss.backward()
| 58 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case__ : int = botoa.client("""iam""" )
snake_case__ : Union[str, Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) )
snake_case__ : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__magic_name__ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"]
def UpperCamelCase__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , )
snake_case__ : List[Any] = None
if credentials_configuration == 0:
snake_case__ : Dict = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
snake_case__ : List[str] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
snake_case__ : List[str] = _ask_field("""AWS Access Key ID: """ )
snake_case__ : int = aws_access_key_id
snake_case__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ )
snake_case__ : List[str] = aws_secret_access_key
snake_case__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
snake_case__ : Optional[int] = aws_region
snake_case__ : int = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , )
if role_management == 0:
snake_case__ : Optional[Any] = _ask_field("""Enter your IAM role name: """ )
else:
snake_case__ : Optional[int] = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(__magic_name__ )
snake_case__ : Dict = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Any = None
if is_custom_docker_image:
snake_case__ : str = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() )
snake_case__ : Tuple = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : List[Any] = None
if is_sagemaker_inputs_enabled:
snake_case__ : str = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Optional[int] = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case__ : List[Any] = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , )
snake_case__ : Tuple = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
snake_case__ : Any = {}
snake_case__ : List[Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
snake_case__ : str = """dynamo_"""
snake_case__ : Tuple = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case__ : List[str] = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
snake_case__ : str = _ask_options(
"""Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , )
snake_case__ : Union[str, Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : str = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , )
snake_case__ : Dict = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
snake_case__ : List[str] = _ask_options(
__magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case__ : Optional[int] = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" )
snake_case__ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case__ : Optional[Any] = _ask_field(
"""How many machines do you want use? [1]: """ , __magic_name__ , default=1 , )
snake_case__ : Union[str, Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
| 38 | 0 |
import inspect
import unittest
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[Any]:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowerCamelCase__: Optional[Any] =inspect.getmembers(UpperCAmelCase_ , inspect.isclass)
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCamelCase__: Tuple ="k-diffusion"
elif backend == "invisible_watermark":
lowerCamelCase__: List[Any] ="invisible-watermark"
assert backend in deps, F"""{backend} is not in the deps table!"""
| 59 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase__ ( __magic_name__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
snake_case__ : Union[str, Any] = f"https://www.amazon.in/laptop/s?k={product}"
snake_case__ : List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
snake_case__ : int = BeautifulSoup(requests.get(__magic_name__ , headers=__magic_name__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case__ : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
snake_case__ : Optional[int] = item.ha.text
snake_case__ : Any = """https://www.amazon.in/""" + item.ha.a["""href"""]
snake_case__ : List[str] = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
snake_case__ : Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
snake_case__ : Optional[int] = """Not available"""
try:
snake_case__ : Tuple = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
snake_case__ : Optional[Any] = """"""
try:
snake_case__ : str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_00 )
except ValueError:
snake_case__ : List[Any] = float("""nan""" )
except AttributeError:
pass
snake_case__ : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case__ : List[Any] = """ """
snake_case__ : Union[str, Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : int = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 38 | 0 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Any = (boundary[1] - boundary[0]) / steps
snake_case_ : List[Any] = boundary[0]
snake_case_ : Tuple = boundary[1]
snake_case_ : Any = make_points(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case_ : int = 0.0
y += (h / 2.0) * f(_UpperCamelCase )
for i in x_i:
# print(i)
y += h * f(_UpperCamelCase )
y += (h / 2.0) * f(_UpperCamelCase )
return y
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = a + h
while x < (b - h):
yield x
snake_case_ : List[Any] = x + h
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: # enter your function here
"""simple docstring"""
snake_case_ : Any = (x - 0) * (x - 0)
return y
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = 0.0 # Lower bound of integration
snake_case_ : Dict = 1.0 # Upper bound of integration
snake_case_ : List[Any] = 10.0 # define number of steps or resolution
snake_case_ : Tuple = [a, b] # define boundary of integration
snake_case_ : str = method_a(_UpperCamelCase , _UpperCamelCase )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 60 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LongformerTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = LongformerTokenizerFast
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Optional[int] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = """lower newer"""
snake_case__ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase ( self ):
snake_case__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : Tuple = """lower newer"""
snake_case__ : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokens + [tokenizer.unk_token]
snake_case__ : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
snake_case__ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = """Encode this sequence."""
snake_case__ : Union[str, Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case__ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case__ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
snake_case__ : Dict = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : str = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
snake_case__ : str = encoded.index(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : str = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __UpperCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["""trim_offsets"""] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : Any = f"{text_of_1_token} {text_of_1_token}"
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Optional[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 38 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.