code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ):
a__ = int(__lowerCAmelCase )
assert noofclusters < len(__lowerCAmelCase )
# Find out the dimensionality
a__ = len(vectors[0] )
# Will help select random centroids from among the available vectors
a__ = list(range(len(__lowerCAmelCase ) ) )
shuffle(__lowerCAmelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
a__ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
a__ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
a__ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__lowerCAmelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
a__ = tf.placeholder('float64' , [dim] )
a__ = []
for centroid in centroids:
cent_assigns.append(tf.assign(__lowerCAmelCase , __lowerCAmelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
a__ = [tf.Variable(0 ) for i in range(len(__lowerCAmelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
a__ = tf.placeholder('int32' )
a__ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__lowerCAmelCase , __lowerCAmelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
a__ = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
a__ = tf.reduce_mean(__lowerCAmelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
a__ = tf.placeholder('float' , [dim] )
a__ = tf.placeholder('float' , [dim] )
a__ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__lowerCAmelCase , __lowerCAmelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
a__ = tf.placeholder('float' , [noofclusters] )
a__ = tf.argmin(__lowerCAmelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
a__ = tf.initialize_all_variables()
# Initialize all variables
sess.run(__lowerCAmelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
a__ = 1_0_0
for _ in range(__lowerCAmelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__lowerCAmelCase ) ):
a__ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
a__ = [
sess.run(__lowerCAmelCase , feed_dict={va: vect, va: sess.run(__lowerCAmelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
a__ = sess.run(
__lowerCAmelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__lowerCAmelCase ):
# Collect all the vectors assigned to this cluster
a__ = [
vectors[i]
for i in range(len(__lowerCAmelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
a__ = sess.run(
__lowerCAmelCase , feed_dict={mean_input: array(__lowerCAmelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
a__ = sess.run(__lowerCAmelCase )
a__ = sess.run(__lowerCAmelCase )
return centroids, assignments
| 712 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 | 0 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
snake_case : Optional[int] = 5_00_03
snake_case : Optional[int] = 5_00_02
@require_sentencepiece
@require_tokenizers
class snake_case_ (_snake_case , unittest.TestCase ):
UpperCAmelCase__ : Dict = PLBartTokenizer
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Union[str, Any] = False
def lowerCamelCase__( self :str ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
a__ = PLBartTokenizer(__snake_case ,language_codes='base' ,keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[Any]:
a__ = PLBartTokenizer(__snake_case ,language_codes='base' ,keep_accents=__snake_case )
a__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
a__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
a__ = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
a__ = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
a__ = tokenizer.vocab_size
a__ = [tokenizer.convert_ids_to_tokens(__snake_case ) for x in range(end - 4 ,__snake_case )]
self.assertListEqual(__snake_case ,['__java__', '__python__', '__en_XX__', '<mask>'] )
a__ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
a__ = tokenizer(__snake_case ).input_ids
self.assertEqual(
tokenizer.decode(__snake_case ,skip_special_tokens=__snake_case ,clean_up_tokenization_spaces=__snake_case ) ,__snake_case ,)
def lowerCamelCase__( self :Optional[int] ) -> Optional[int]:
a__ = PLBartTokenizer(__snake_case ,language_codes='multi' ,keep_accents=__snake_case )
a__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
a__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__snake_case ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
a__ = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
a__ = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
a__ = tokenizer.vocab_size
a__ = [tokenizer.convert_ids_to_tokens(__snake_case ) for x in range(end - 7 ,__snake_case )]
self.assertListEqual(
__snake_case ,['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
a__ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
a__ = tokenizer(__snake_case ).input_ids
self.assertEqual(
tokenizer.decode(__snake_case ,skip_special_tokens=__snake_case ,clean_up_tokenization_spaces=__snake_case ) ,__snake_case ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = 'uclanlp/plbart-python-en_XX'
UpperCAmelCase__ : Dict = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
UpperCAmelCase__ : Tuple = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
UpperCAmelCase__ : str = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def lowerCamelCase__( cls :Optional[int] ) -> List[str]:
a__ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name ,language_codes='base' ,src_lang='python' ,tgt_lang='en_XX' )
a__ = 1
return cls
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] ,5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] ,5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] ,5_00_03 )
def lowerCamelCase__( self :Dict ) -> Any:
a__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,__snake_case )
def lowerCamelCase__( self :Dict ) -> Union[str, Any]:
self.assertIn(__snake_case ,self.tokenizer.all_special_ids )
a__ = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
a__ = self.tokenizer.decode(__snake_case ,skip_special_tokens=__snake_case )
a__ = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertNotIn(self.tokenizer.eos_token ,__snake_case )
def lowerCamelCase__( self :Any ) -> str:
a__ = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] ,__snake_case )
a__ = 10
a__ = self.tokenizer(__snake_case ,max_length=__snake_case ,truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,__snake_case )
self.assertEqual(len(__snake_case ) ,__snake_case )
def lowerCamelCase__( self :Any ) -> List[str]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) ,[5_00_04, 5_00_01] )
def lowerCamelCase__( self :Union[str, Any] ) -> int:
a__ = tempfile.mkdtemp()
a__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
a__ = PLBartTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,__snake_case )
@require_torch
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=__snake_case ,return_tensors='pt' )
a__ = shift_tokens_right(batch['labels'] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] ,__snake_case )
self.assertEqual(batch.decoder_input_ids[1][-1] ,2 )
self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] )
@require_torch
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
a__ = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=__snake_case ,truncation=__snake_case ,max_length=len(self.expected_src_tokens ) ,return_tensors='pt' ,)
a__ = shift_tokens_right(batch['labels'] ,self.tokenizer.pad_token_id )
self.assertIsInstance(__snake_case ,__snake_case )
self.assertEqual((2, 26) ,batch.input_ids.shape )
self.assertEqual((2, 26) ,batch.attention_mask.shape )
a__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,__snake_case )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCamelCase__( self :str ) -> Dict:
a__ = self.tokenizer(self.src_text ,padding=__snake_case ,truncation=__snake_case ,max_length=3 ,return_tensors='pt' )
a__ = self.tokenizer(
text_target=self.tgt_text ,padding=__snake_case ,truncation=__snake_case ,max_length=10 ,return_tensors='pt' )
a__ = targets['input_ids']
a__ = shift_tokens_right(__snake_case ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def lowerCamelCase__( self :Any ) -> int:
a__ = self.tokenizer._build_translation_inputs(
'A test' ,return_tensors='pt' ,src_lang='en_XX' ,tgt_lang='java' )
self.assertEqual(
nested_simplify(__snake_case ) ,{
# A, test, EOS, en_XX
'input_ids': [[1_50, 2_42, 2, 5_00_03]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_00_01,
} ,)
| 713 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 | 0 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = DownBlockaD # noqa F405
UpperCAmelCase__ : Any = '''down'''
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[Any]:
a__ = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : List[str] = ResnetDownsampleBlockaD # noqa F405
UpperCAmelCase__ : Optional[int] = '''down'''
def lowerCamelCase__( self :int ) -> Optional[Any]:
a__ = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = AttnDownBlockaD # noqa F405
UpperCAmelCase__ : Optional[int] = '''down'''
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]:
a__ = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = CrossAttnDownBlockaD # noqa F405
UpperCAmelCase__ : List[str] = '''down'''
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ , a__ = super().prepare_init_args_and_inputs_for_common()
a__ = 32
return init_dict, inputs_dict
def lowerCamelCase__( self :Tuple ) -> str:
a__ = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : List[str] = SimpleCrossAttnDownBlockaD # noqa F405
UpperCAmelCase__ : Optional[int] = '''down'''
@property
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
return super().get_dummy_input(include_encoder_hidden_states=_UpperCAmelCase )
def lowerCamelCase__( self :Optional[int] ) -> Tuple:
a__ , a__ = super().prepare_init_args_and_inputs_for_common()
a__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' ,'MPS result is not consistent' )
def lowerCamelCase__( self :Optional[int] ) -> Dict:
a__ = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : List[str] = SkipDownBlockaD # noqa F405
UpperCAmelCase__ : List[Any] = '''down'''
@property
def lowerCamelCase__( self :Optional[Any] ) -> List[Any]:
return super().get_dummy_input(include_skip_sample=_UpperCAmelCase )
def lowerCamelCase__( self :List[str] ) -> int:
a__ = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = AttnSkipDownBlockaD # noqa F405
UpperCAmelCase__ : int = '''down'''
@property
def lowerCamelCase__( self :str ) -> str:
return super().get_dummy_input(include_skip_sample=_UpperCAmelCase )
def lowerCamelCase__( self :str ) -> Tuple:
a__ = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : List[str] = DownEncoderBlockaD # noqa F405
UpperCAmelCase__ : Optional[int] = '''down'''
@property
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
return super().get_dummy_input(include_temb=_UpperCAmelCase )
def lowerCamelCase__( self :Optional[int] ) -> Dict:
a__ = {
'in_channels': 32,
'out_channels': 32,
}
a__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = AttnDownEncoderBlockaD # noqa F405
UpperCAmelCase__ : str = '''down'''
@property
def lowerCamelCase__( self :Optional[Any] ) -> List[Any]:
return super().get_dummy_input(include_temb=_UpperCAmelCase )
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
a__ = {
'in_channels': 32,
'out_channels': 32,
}
a__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__( self :str ) -> Dict:
a__ = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = UNetMidBlockaD # noqa F405
UpperCAmelCase__ : Tuple = '''mid'''
def lowerCamelCase__( self :Optional[Any] ) -> int:
a__ = {
'in_channels': 32,
'temb_channels': 1_28,
}
a__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__( self :Any ) -> Any:
a__ = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = UNetMidBlockaDCrossAttn # noqa F405
UpperCAmelCase__ : str = '''mid'''
def lowerCamelCase__( self :Any ) -> str:
a__ , a__ = super().prepare_init_args_and_inputs_for_common()
a__ = 32
return init_dict, inputs_dict
def lowerCamelCase__( self :Any ) -> Tuple:
a__ = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
UpperCAmelCase__ : Optional[Any] = '''mid'''
@property
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
return super().get_dummy_input(include_encoder_hidden_states=_UpperCAmelCase )
def lowerCamelCase__( self :str ) -> List[Any]:
a__ , a__ = super().prepare_init_args_and_inputs_for_common()
a__ = 32
return init_dict, inputs_dict
def lowerCamelCase__( self :List[str] ) -> Any:
a__ = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = UpBlockaD # noqa F405
UpperCAmelCase__ : int = '''up'''
@property
def lowerCamelCase__( self :Dict ) -> Union[str, Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=_UpperCAmelCase )
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[Any]:
a__ = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = ResnetUpsampleBlockaD # noqa F405
UpperCAmelCase__ : Dict = '''up'''
@property
def lowerCamelCase__( self :Optional[int] ) -> Optional[int]:
return super().get_dummy_input(include_res_hidden_states_tuple=_UpperCAmelCase )
def lowerCamelCase__( self :List[str] ) -> int:
a__ = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = CrossAttnUpBlockaD # noqa F405
UpperCAmelCase__ : int = '''up'''
@property
def lowerCamelCase__( self :int ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=_UpperCAmelCase )
def lowerCamelCase__( self :Any ) -> Any:
a__ , a__ = super().prepare_init_args_and_inputs_for_common()
a__ = 32
return init_dict, inputs_dict
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
a__ = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
UpperCAmelCase__ : Optional[int] = '''up'''
@property
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
return super().get_dummy_input(include_res_hidden_states_tuple=_UpperCAmelCase ,include_encoder_hidden_states=_UpperCAmelCase )
def lowerCamelCase__( self :Any ) -> str:
a__ , a__ = super().prepare_init_args_and_inputs_for_common()
a__ = 32
return init_dict, inputs_dict
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = AttnUpBlockaD # noqa F405
UpperCAmelCase__ : Tuple = '''up'''
@property
def lowerCamelCase__( self :str ) -> int:
return super().get_dummy_input(include_res_hidden_states_tuple=_UpperCAmelCase )
@unittest.skipIf(torch_device == 'mps' ,'MPS result is not consistent' )
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
a__ = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = SkipUpBlockaD # noqa F405
UpperCAmelCase__ : int = '''up'''
@property
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
return super().get_dummy_input(include_res_hidden_states_tuple=_UpperCAmelCase )
def lowerCamelCase__( self :List[str] ) -> Any:
a__ = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = AttnSkipUpBlockaD # noqa F405
UpperCAmelCase__ : Dict = '''up'''
@property
def lowerCamelCase__( self :Union[str, Any] ) -> int:
return super().get_dummy_input(include_res_hidden_states_tuple=_UpperCAmelCase )
def lowerCamelCase__( self :Union[str, Any] ) -> Dict:
a__ = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = UpDecoderBlockaD # noqa F405
UpperCAmelCase__ : Optional[Any] = '''up'''
@property
def lowerCamelCase__( self :Optional[int] ) -> Any:
return super().get_dummy_input(include_temb=_UpperCAmelCase )
def lowerCamelCase__( self :Any ) -> str:
a__ = {'in_channels': 32, 'out_channels': 32}
a__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__( self :int ) -> Dict:
a__ = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(_UpperCAmelCase )
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = AttnUpDecoderBlockaD # noqa F405
UpperCAmelCase__ : Tuple = '''up'''
@property
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
return super().get_dummy_input(include_temb=_UpperCAmelCase )
def lowerCamelCase__( self :Union[str, Any] ) -> int:
a__ = {'in_channels': 32, 'out_channels': 32}
a__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__( self :Optional[Any] ) -> Any:
a__ = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(_UpperCAmelCase )
| 714 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 0 |
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
def merge(__lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__A ) <= 1:
return collection
a__ = len(__A ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Dict = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : Dict = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 715 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 0 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ):
a__ = []
a__ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
a__ = result + left + right
return input_list
def __lowercase ( __lowerCAmelCase : Optional[int] ):
if len(_lowerCamelCase ) <= 1:
return input_list
a__ = list(_lowerCamelCase )
# iteration for two-way merging
a__ = 2
while p <= len(_lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
a__ = i
a__ = i + p - 1
a__ = (low + high + 1) // 2
a__ = merge(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(_lowerCamelCase ):
a__ = i
a__ = merge(_lowerCamelCase , 0 , _lowerCamelCase , len(_lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
snake_case : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
snake_case : List[str] = []
else:
snake_case : int = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 716 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 0 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class snake_case_ (nn.Module ):
def __init__( self :Any ,__snake_case :int ,__snake_case :int ,__snake_case :int ,__snake_case :int=0.0 ,__snake_case :Optional[int] = None ,__snake_case :str = "geglu" ,__snake_case :Optional[int] = None ,__snake_case :bool = False ,__snake_case :bool = False ,__snake_case :bool = False ,__snake_case :bool = False ,__snake_case :bool = True ,__snake_case :str = "layer_norm" ,__snake_case :bool = False ,) -> Union[str, Any]:
super().__init__()
a__ = only_cross_attention
a__ = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
a__ = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
a__ = AdaLayerNorm(__lowerCamelCase ,__lowerCamelCase )
elif self.use_ada_layer_norm_zero:
a__ = AdaLayerNormZero(__lowerCamelCase ,__lowerCamelCase )
else:
a__ = nn.LayerNorm(__lowerCamelCase ,elementwise_affine=__lowerCamelCase )
a__ = Attention(
query_dim=__lowerCamelCase ,heads=__lowerCamelCase ,dim_head=__lowerCamelCase ,dropout=__lowerCamelCase ,bias=__lowerCamelCase ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=__lowerCamelCase ,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
a__ = (
AdaLayerNorm(__lowerCamelCase ,__lowerCamelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(__lowerCamelCase ,elementwise_affine=__lowerCamelCase )
)
a__ = Attention(
query_dim=__lowerCamelCase ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=__lowerCamelCase ,dim_head=__lowerCamelCase ,dropout=__lowerCamelCase ,bias=__lowerCamelCase ,upcast_attention=__lowerCamelCase ,) # is self-attn if encoder_hidden_states is none
else:
a__ = None
a__ = None
# 3. Feed-forward
a__ = nn.LayerNorm(__lowerCamelCase ,elementwise_affine=__lowerCamelCase )
a__ = FeedForward(__lowerCamelCase ,dropout=__lowerCamelCase ,activation_fn=__lowerCamelCase ,final_dropout=__lowerCamelCase )
# let chunk size default to None
a__ = None
a__ = 0
def lowerCamelCase__( self :int ,__snake_case :Optional[int] ,__snake_case :int ) -> Union[str, Any]:
# Sets chunk feed-forward
a__ = chunk_size
a__ = dim
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :Optional[torch.FloatTensor] = None ,__snake_case :Optional[torch.FloatTensor] = None ,__snake_case :Optional[torch.FloatTensor] = None ,__snake_case :Optional[torch.LongTensor] = None ,__snake_case :Dict[str, Any] = None ,__snake_case :Optional[torch.LongTensor] = None ,) -> str:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
a__ = self.norma(__lowerCamelCase ,__lowerCamelCase )
elif self.use_ada_layer_norm_zero:
a__ = self.norma(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,hidden_dtype=hidden_states.dtype )
else:
a__ = self.norma(__lowerCamelCase )
a__ = cross_attention_kwargs if cross_attention_kwargs is not None else {}
a__ = self.attna(
__lowerCamelCase ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=__lowerCamelCase ,**__lowerCamelCase ,)
if self.use_ada_layer_norm_zero:
a__ = gate_msa.unsqueeze(1 ) * attn_output
a__ = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
a__ = (
self.norma(__lowerCamelCase ,__lowerCamelCase ) if self.use_ada_layer_norm else self.norma(__lowerCamelCase )
)
a__ = self.attna(
__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ,attention_mask=__lowerCamelCase ,**__lowerCamelCase ,)
a__ = attn_output + hidden_states
# 3. Feed-forward
a__ = self.norma(__lowerCamelCase )
if self.use_ada_layer_norm_zero:
a__ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
a__ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
a__ = torch.cat(
[self.ff(__lowerCamelCase ) for hid_slice in norm_hidden_states.chunk(__lowerCamelCase ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,)
else:
a__ = self.ff(__lowerCamelCase )
if self.use_ada_layer_norm_zero:
a__ = gate_mlp.unsqueeze(1 ) * ff_output
a__ = ff_output + hidden_states
return hidden_states
class snake_case_ (nn.Module ):
def __init__( self :int ,__snake_case :int ,__snake_case :Optional[int] = None ,__snake_case :int = 4 ,__snake_case :float = 0.0 ,__snake_case :str = "geglu" ,__snake_case :bool = False ,) -> Dict:
super().__init__()
a__ = int(dim * mult )
a__ = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
a__ = GELU(__lowerCamelCase ,__lowerCamelCase )
if activation_fn == "gelu-approximate":
a__ = GELU(__lowerCamelCase ,__lowerCamelCase ,approximate='tanh' )
elif activation_fn == "geglu":
a__ = GEGLU(__lowerCamelCase ,__lowerCamelCase )
elif activation_fn == "geglu-approximate":
a__ = ApproximateGELU(__lowerCamelCase ,__lowerCamelCase )
a__ = nn.ModuleList([] )
# project in
self.net.append(__lowerCamelCase )
# project dropout
self.net.append(nn.Dropout(__lowerCamelCase ) )
# project out
self.net.append(nn.Linear(__lowerCamelCase ,__lowerCamelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__lowerCamelCase ) )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ) -> List[str]:
for module in self.net:
a__ = module(__lowerCamelCase )
return hidden_states
class snake_case_ (nn.Module ):
def __init__( self :Optional[int] ,__snake_case :int ,__snake_case :int ,__snake_case :str = "none" ) -> Optional[Any]:
super().__init__()
a__ = nn.Linear(__lowerCamelCase ,__lowerCamelCase )
a__ = approximate
def lowerCamelCase__( self :Optional[Any] ,__snake_case :str ) -> List[str]:
if gate.device.type != "mps":
return F.gelu(__lowerCamelCase ,approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype )
def lowerCamelCase__( self :List[Any] ,__snake_case :str ) -> List[Any]:
a__ = self.proj(__lowerCamelCase )
a__ = self.gelu(__lowerCamelCase )
return hidden_states
class snake_case_ (nn.Module ):
def __init__( self :List[str] ,__snake_case :int ,__snake_case :int ) -> Optional[Any]:
super().__init__()
a__ = nn.Linear(__lowerCamelCase ,dim_out * 2 )
def lowerCamelCase__( self :List[str] ,__snake_case :Union[str, Any] ) -> Optional[int]:
if gate.device.type != "mps":
return F.gelu(__lowerCamelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ) -> Tuple:
a__ = self.proj(__lowerCamelCase ).chunk(2 ,dim=-1 )
return hidden_states * self.gelu(__lowerCamelCase )
class snake_case_ (nn.Module ):
def __init__( self :Tuple ,__snake_case :int ,__snake_case :int ) -> Union[str, Any]:
super().__init__()
a__ = nn.Linear(__lowerCamelCase ,__lowerCamelCase )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Union[str, Any] ) -> Optional[Any]:
a__ = self.proj(__lowerCamelCase )
return x * torch.sigmoid(1.7_02 * x )
class snake_case_ (nn.Module ):
def __init__( self :List[str] ,__snake_case :Union[str, Any] ,__snake_case :Optional[int] ) -> Union[str, Any]:
super().__init__()
a__ = nn.Embedding(__lowerCamelCase ,__lowerCamelCase )
a__ = nn.SiLU()
a__ = nn.Linear(__lowerCamelCase ,embedding_dim * 2 )
a__ = nn.LayerNorm(__lowerCamelCase ,elementwise_affine=__lowerCamelCase )
def lowerCamelCase__( self :Optional[int] ,__snake_case :Any ,__snake_case :Any ) -> Optional[int]:
a__ = self.linear(self.silu(self.emb(__lowerCamelCase ) ) )
a__ = torch.chunk(__lowerCamelCase ,2 )
a__ = self.norm(__lowerCamelCase ) * (1 + scale) + shift
return x
class snake_case_ (nn.Module ):
def __init__( self :Union[str, Any] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> Optional[int]:
super().__init__()
a__ = CombinedTimestepLabelEmbeddings(__lowerCamelCase ,__lowerCamelCase )
a__ = nn.SiLU()
a__ = nn.Linear(__lowerCamelCase ,6 * embedding_dim ,bias=__lowerCamelCase )
a__ = nn.LayerNorm(__lowerCamelCase ,elementwise_affine=__lowerCamelCase ,eps=1E-6 )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Tuple ,__snake_case :Any ,__snake_case :Dict=None ) -> Dict:
a__ = self.linear(self.silu(self.emb(__lowerCamelCase ,__lowerCamelCase ,hidden_dtype=__lowerCamelCase ) ) )
a__ = emb.chunk(6 ,dim=1 )
a__ = self.norm(__lowerCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class snake_case_ (nn.Module ):
def __init__( self :str ,__snake_case :int ,__snake_case :int ,__snake_case :int ,__snake_case :Optional[str] = None ,__snake_case :float = 1E-5 ) -> Dict:
super().__init__()
a__ = num_groups
a__ = eps
if act_fn is None:
a__ = None
else:
a__ = get_activation(__lowerCamelCase )
a__ = nn.Linear(__lowerCamelCase ,out_dim * 2 )
def lowerCamelCase__( self :str ,__snake_case :Union[str, Any] ,__snake_case :int ) -> str:
if self.act:
a__ = self.act(__lowerCamelCase )
a__ = self.linear(__lowerCamelCase )
a__ = emb[:, :, None, None]
a__ = emb.chunk(2 ,dim=1 )
a__ = F.group_norm(__lowerCamelCase ,self.num_groups ,eps=self.eps )
a__ = x * (1 + scale) + shift
return x
| 717 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 0 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : List[str] = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : Dict ):
a__ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a__ = 1_2_8
elif "12-12" in model_name:
a__ = 1_2
a__ = 1_2
elif "14-14" in model_name:
a__ = 1_4
a__ = 1_4
elif "16-16" in model_name:
a__ = 1_6
a__ = 1_6
else:
raise ValueError('Model not supported' )
a__ = '''huggingface/label-files'''
if "speech-commands" in model_name:
a__ = 3_5
a__ = '''speech-commands-v2-id2label.json'''
else:
a__ = 5_2_7
a__ = '''audioset-id2label.json'''
a__ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
a__ = {int(a_ ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( __lowerCAmelCase : List[str] ):
if "module.v" in name:
a__ = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
a__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
a__ = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
a__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
a__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
a__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
a__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a__ = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a__ = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
a__ = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
a__ = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ):
for key in orig_state_dict.copy().keys():
a__ = orig_state_dict.pop(a_ )
if "qkv" in key:
a__ = key.split('.' )
a__ = int(key_split[3] )
a__ = config.hidden_size
if "weight" in key:
a__ = val[:dim, :]
a__ = val[dim : dim * 2, :]
a__ = val[-dim:, :]
else:
a__ = val[:dim]
a__ = val[dim : dim * 2]
a__ = val[-dim:]
else:
a__ = val
return orig_state_dict
def __lowercase ( __lowerCAmelCase : str ):
a__ = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(a_ , a_ )
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=False ):
a__ = get_audio_spectrogram_transformer_config(a_ )
a__ = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
a__ = model_name_to_url[model_name]
a__ = torch.hub.load_state_dict_from_url(a_ , map_location='cpu' )
# remove some keys
remove_keys(a_ )
# rename some keys
a__ = convert_state_dict(a_ , a_ )
# load 🤗 model
a__ = ASTForAudioClassification(a_ )
model.eval()
model.load_state_dict(a_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a__ = -4.2_677_393 if '''speech-commands''' not in model_name else -6.845_978
a__ = 4.5_689_974 if '''speech-commands''' not in model_name else 5.5_654_526
a__ = 1_0_2_4 if '''speech-commands''' not in model_name else 1_2_8
a__ = ASTFeatureExtractor(mean=a_ , std=a_ , max_length=a_ )
if "speech-commands" in model_name:
a__ = load_dataset('speech_commands' , 'v0.02' , split='validation' )
a__ = dataset[0]['''audio''']['''array''']
else:
a__ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
a__ = torchaudio.load(a_ )
a__ = waveform.squeeze().numpy()
a__ = feature_extractor(a_ , sampling_rate=1_6_0_0_0 , return_tensors='pt' )
# forward pass
a__ = model(**a_ )
a__ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a__ = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a__ = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a__ = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a__ = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a__ = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a__ = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a__ = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
a__ = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , a_ , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(a_ ).mkdir(exist_ok=a_ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a_ )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(a_ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case : Tuple = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 718 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
def __lowercase ( __lowerCAmelCase : List[str] ):
a__ = len(_A )
for i in range(_A ):
for j in range(i + 1 , _A ):
if numbers[j] < numbers[i]:
a__ , a__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
snake_case : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 719 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 0 |
def __lowercase ( __lowerCAmelCase : Tuple = 2_0_0_0_0_0_0 ):
a__ = [0 for i in range(n + 1 )]
a__ = 1
a__ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __lowerCAmelCase ):
a__ = 1
a__ = 0
for i in range(__lowerCAmelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 720 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 0 |
import json
import sys
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ):
with open(__UpperCamelCase , encoding='utf-8' ) as f:
a__ = json.load(__UpperCamelCase )
a__ = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(__UpperCamelCase ):
a__ = results[benchmark_name]
a__ = benchmark_name.split('/' )[-1]
output_md.append(F'### Benchmark: {benchmark_file_name}' )
a__ = """| metric |"""
a__ = """|--------|"""
a__ = """| new / old (diff) |"""
for metric_name in sorted(__UpperCamelCase ):
a__ = benchmark_res[metric_name]
a__ = metric_vals["""new"""]
a__ = metric_vals.get('old' , __UpperCamelCase )
a__ = metric_vals.get('diff' , __UpperCamelCase )
a__ = F' {new_val:f}' if isinstance(__UpperCamelCase , (int, float) ) else """None"""
if old_val is not None:
val_str += F' / {old_val:f}' if isinstance(__UpperCamelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += F' ({dif_val:f})' if isinstance(__UpperCamelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(__UpperCamelCase ) )
if __name__ == "__main__":
snake_case : Dict = sys.argv[1]
snake_case : str = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 721 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __lowercase ( __lowerCAmelCase : Optional[int] ):
a__ = 3_8_4
if "tiny" in model_name:
a__ = [3, 3, 9, 3]
a__ = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
a__ = [3, 3, 2_7, 3]
a__ = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
a__ = [3, 3, 2_7, 3]
a__ = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
a__ = 5_1_2
if "large" in model_name:
a__ = [3, 3, 2_7, 3]
a__ = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
a__ = 7_6_8
if "xlarge" in model_name:
a__ = [3, 3, 2_7, 3]
a__ = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
a__ = 1_0_2_4
# set label information
a__ = 1_5_0
a__ = 'huggingface/label-files'
a__ = 'ade20k-id2label.json'
a__ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
a__ = {int(snake_case__ ): v for k, v in idalabel.items()}
a__ = {v: k for k, v in idalabel.items()}
a__ = ConvNextConfig(
depths=snake_case__ , hidden_sizes=snake_case__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
a__ = UperNetConfig(
backbone_config=snake_case__ , auxiliary_in_channels=snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ , )
return config
def __lowercase ( __lowerCAmelCase : Any ):
a__ = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
a__ = dct.pop(snake_case__ )
a__ = val
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ):
a__ = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
a__ = model_name_to_url[model_name]
a__ = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )['state_dict']
a__ = get_upernet_config(snake_case__ )
a__ = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a__ = state_dict.pop(snake_case__ )
if "bn" in key:
a__ = key.replace('bn' , 'batch_norm' )
a__ = val
# rename keys
a__ = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
a__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
a__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
a__ = SegformerImageProcessor()
a__ = processor(snake_case__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
a__ = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
a__ = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
a__ = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
a__ = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
a__ = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
a__ = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case : Any = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 0 |
'''simple docstring'''
from __future__ import annotations
class snake_case_ :
def __init__( self :Any ,__snake_case :int ,__snake_case :int ) -> str:
a__ , a__ = text, pattern
a__ , a__ = len(__snake_case ), len(__snake_case )
def lowerCamelCase__( self :Dict ,__snake_case :Any ) -> Dict:
for i in range(self.patLen - 1 ,-1 ,-1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__( self :List[Any] ,__snake_case :List[Any] ) -> Any:
for i in range(self.patLen - 1 ,-1 ,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
# searches pattern in text and returns index positions
a__ = []
for i in range(self.textLen - self.patLen + 1 ):
a__ = self.mismatch_in_text(__snake_case )
if mismatch_index == -1:
positions.append(__snake_case )
else:
a__ = self.match_in_pattern(self.text[mismatch_index] )
a__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
snake_case : Tuple = '''ABAABA'''
snake_case : Dict = '''AB'''
snake_case : Tuple = BoyerMooreSearch(text, pattern)
snake_case : List[str] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 701 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 0 |
import os
def __lowercase ( ):
a__ = os.path.dirname(os.path.realpath(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'triangle.txt' )
with open(__lowerCAmelCase ) as f:
a__ = f.readlines()
a__ = []
for line in triangle:
a__ = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(__lowerCAmelCase ) )
a.append(__lowerCAmelCase )
for i in range(1 , len(__lowerCAmelCase ) ):
for j in range(len(a[i] ) ):
a__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
a__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__lowerCAmelCase , __lowerCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 702 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class snake_case_ (UpperCamelCase_ ):
def lowerCamelCase__( self :Any ) -> Optional[int]:
a__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A ,'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__A ,'num_attention_heads' ) )
class snake_case_ :
def __init__( self :Any ,__snake_case :Optional[int] ,__snake_case :Dict=13 ,__snake_case :Union[str, Any]=64 ,__snake_case :int=3 ,__snake_case :List[str]=3 ,__snake_case :Optional[Any]=2 ,__snake_case :Dict=1 ,__snake_case :Optional[int]=16 ,__snake_case :Any=[1_28, 2_56, 3_84] ,__snake_case :List[str]=[4, 6, 8] ,__snake_case :Optional[Any]=[2, 3, 4] ,__snake_case :str=[16, 16, 16] ,__snake_case :Dict=0 ,__snake_case :Dict=[2, 2, 2] ,__snake_case :Dict=[2, 2, 2] ,__snake_case :Dict=0.02 ,__snake_case :Dict=True ,__snake_case :Dict=True ,__snake_case :str=2 ,) -> Optional[Any]:
a__ = parent
a__ = batch_size
a__ = image_size
a__ = num_channels
a__ = kernel_size
a__ = stride
a__ = padding
a__ = hidden_sizes
a__ = num_attention_heads
a__ = depths
a__ = key_dim
a__ = drop_path_rate
a__ = patch_size
a__ = attention_ratio
a__ = mlp_ratio
a__ = initializer_range
a__ = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
a__ = is_training
a__ = use_labels
a__ = num_labels
a__ = initializer_range
def lowerCamelCase__( self :Tuple ) -> Any:
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.num_labels )
a__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__( self :Dict ) -> Optional[Any]:
return LevitConfig(
image_size=self.image_size ,num_channels=self.num_channels ,kernel_size=self.kernel_size ,stride=self.stride ,padding=self.padding ,patch_size=self.patch_size ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,depths=self.depths ,key_dim=self.key_dim ,drop_path_rate=self.drop_path_rate ,mlp_ratio=self.mlp_ratio ,attention_ratio=self.attention_ratio ,initializer_range=self.initializer_range ,down_ops=self.down_ops ,)
def lowerCamelCase__( self :Dict ,__snake_case :int ,__snake_case :List[str] ,__snake_case :List[str] ) -> Tuple:
a__ = LevitModel(config=__A )
model.to(__A )
model.eval()
a__ = model(__A )
a__ = (self.image_size, self.image_size)
a__ = image_size[0], image_size[1]
for _ in range(4 ):
a__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
a__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) ,)
def lowerCamelCase__( self :Dict ,__snake_case :Tuple ,__snake_case :Optional[int] ,__snake_case :Optional[Any] ) -> Optional[int]:
a__ = self.num_labels
a__ = LevitForImageClassification(__A )
model.to(__A )
model.eval()
a__ = model(__A ,labels=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
a__ = self.prepare_config_and_inputs()
a__ = config_and_inputs
a__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : int = False
def lowerCamelCase__( self :Any ) -> Tuple:
a__ = LevitModelTester(self )
a__ = ConfigTester(self ,config_class=__A ,has_text_modality=__A ,hidden_size=37 )
def lowerCamelCase__( self :List[str] ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__( self :Optional[int] ) -> Dict:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def lowerCamelCase__( self :Dict ) -> Optional[Any]:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def lowerCamelCase__( self :str ) -> int:
pass
@unittest.skip(reason='Levit does not output attentions' )
def lowerCamelCase__( self :Optional[int] ) -> Dict:
pass
def lowerCamelCase__( self :int ) -> Optional[Any]:
a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__A )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__A )
def lowerCamelCase__( self :Tuple ) -> Any:
def check_hidden_states_output(__snake_case :Any ,__snake_case :Dict ,__snake_case :Any ):
a__ = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__A ,__A ) )
a__ = outputs.hidden_states
a__ = len(self.model_tester.depths ) + 1
self.assertEqual(len(__A ) ,__A )
a__ = (self.model_tester.image_size, self.model_tester.image_size)
a__ = image_size[0], image_size[1]
for _ in range(4 ):
a__ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
a__ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[
height * width,
self.model_tester.hidden_sizes[0],
] ,)
a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = True
check_hidden_states_output(__A ,__A ,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
check_hidden_states_output(__A ,__A ,__A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
pass
def lowerCamelCase__( self :List[str] ,__snake_case :str ,__snake_case :Optional[int] ,__snake_case :Optional[int]=False ) -> str:
a__ = super()._prepare_for_class(__A ,__A ,return_labels=__A )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase__( self :List[str] ) -> List[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def lowerCamelCase__( self :List[Any] ) -> List[str]:
if not self.model_tester.is_training:
return
a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__A )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
a__ = model_class(__A )
model.to(__A )
model.train()
a__ = self._prepare_for_class(__A ,__A ,return_labels=__A )
a__ = model(**__A ).loss
loss.backward()
def lowerCamelCase__( self :Tuple ) -> str:
a__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ = False
a__ = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
a__ = model_class(__A )
model.gradient_checkpointing_enable()
model.to(__A )
model.train()
a__ = self._prepare_for_class(__A ,__A ,return_labels=__A )
a__ = model(**__A ).loss
loss.backward()
def lowerCamelCase__( self :Dict ) -> Dict:
a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__A ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
a__ = problem_type["title"]
a__ = problem_type["num_labels"]
a__ = model_class(__A )
model.to(__A )
model.train()
a__ = self._prepare_for_class(__A ,__A ,return_labels=__A )
if problem_type["num_labels"] > 1:
a__ = inputs["labels"].unsqueeze(1 ).repeat(1 ,problem_type['num_labels'] )
a__ = inputs["labels"].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__A ) as warning_list:
a__ = model(**__A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def lowerCamelCase__( self :List[str] ) -> Any:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = LevitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __lowercase ( ):
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case_ (unittest.TestCase ):
@cached_property
def lowerCamelCase__( self :Dict ) -> str:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=__A ,return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
a__ = model(**__A )
# verify the logits
a__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,__A )
a__ = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__A ,atol=1E-4 ) )
| 703 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 0 |
from jiwer import compute_measures
import datasets
snake_case : Optional[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
snake_case : Union[str, Any] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
snake_case : Optional[Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/jitsi/jiwer/'] ,reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] ,)
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple=None ,__snake_case :List[str]=None ,__snake_case :Union[str, Any]=False ) -> int:
if concatenate_texts:
return compute_measures(__snake_case ,__snake_case )["wer"]
else:
a__ = 0
a__ = 0
for prediction, reference in zip(__snake_case ,__snake_case ):
a__ = compute_measures(__snake_case ,__snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 704 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 0 |
from __future__ import annotations
from math import ceil, floor, sqrt
def __lowercase ( __lowerCAmelCase : int = 2_0_0_0_0_0_0 ):
a__ = [0]
a__ = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
a__ = 0
# the area corresponding to the grid that gives the product closest to target
a__ = 0
# an estimate of b, using the quadratic formula
a__ = 42
# the largest integer less than b_estimate
a__ = 42
# the largest integer less than b_estimate
a__ = 42
# the triangle number corresponding to b_floor
a__ = 42
# the triangle number corresponding to b_ceil
a__ = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
a__ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
a__ = floor(__lowerCAmelCase )
a__ = ceil(__lowerCAmelCase )
a__ = triangle_numbers[b_floor]
a__ = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
a__ = triangle_b_first_guess * triangle_a
a__ = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
a__ = triangle_b_second_guess * triangle_a
a__ = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 705 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __lowercase ( ):
a__ = 9, 1_4 # noqa: F841
a__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
a__ = defaultdict(snake_case__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
a__ = mst(snake_case__ )
a__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
a__ = tuple(answer[:2] )
a__ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 706 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 0 |
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
return base * power(__snake_case , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
snake_case = int(input('''Enter the base: ''').strip())
snake_case = int(input('''Enter the exponent: ''').strip())
snake_case = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
snake_case = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 707 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 0 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ (_UpperCAmelCase ):
def __init__( self :Tuple ,__snake_case :Distribution ,__snake_case :Tuple=None ,__snake_case :List[str]=None ,__snake_case :int=0 ) -> str:
a__ = 1.0 if scale is None else scale
a__ = 0.0 if loc is None else loc
super().__init__(lowercase_ ,[AffineTransform(loc=self.loc ,scale=self.scale ,event_dim=lowercase_ )] )
@property
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase__( self :Dict ) -> int:
return self.variance.sqrt()
class snake_case_ (nn.Module ):
def __init__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Dict[str, int] ,__snake_case :Callable[..., Tuple[torch.Tensor]] ,**__snake_case :List[Any] ) -> str:
super().__init__(**lowercase_ )
a__ = args_dim
a__ = nn.ModuleList([nn.Linear(lowercase_ ,lowercase_ ) for dim in args_dim.values()] )
a__ = domain_map
def lowerCamelCase__( self :int ,__snake_case :torch.Tensor ) -> Optional[int]:
a__ = [proj(lowercase_ ) for proj in self.proj]
return self.domain_map(*lowercase_ )
class snake_case_ (nn.Module ):
def __init__( self :Optional[Any] ,__snake_case :List[Any] ) -> Optional[int]:
super().__init__()
a__ = function
def lowerCamelCase__( self :Any ,__snake_case :Tuple ,*__snake_case :Optional[int] ) -> Optional[int]:
return self.function(lowercase_ ,*lowercase_ )
class snake_case_ :
UpperCAmelCase__ : List[Any] = 4_2
UpperCAmelCase__ : List[Any] = 4_2
UpperCAmelCase__ : int = 4_2
def __init__( self :Dict ,__snake_case :int = 1 ) -> str:
a__ = dim
a__ = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[Any] ) -> Union[str, Any]:
if self.dim == 1:
return self.distribution_class(*lowercase_ )
else:
return Independent(self.distribution_class(*lowercase_ ) ,1 )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :str ,__snake_case :Optional[torch.Tensor] = None ,__snake_case :Optional[torch.Tensor] = None ,) -> Any:
a__ = self._base_distribution(lowercase_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowercase_ ,loc=lowercase_ ,scale=lowercase_ ,event_dim=self.event_dim )
@property
def lowerCamelCase__( self :Tuple ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase__( self :Dict ) -> Any:
return len(self.event_shape )
@property
def lowerCamelCase__( self :List[str] ) -> List[Any]:
return 0.0
def lowerCamelCase__( self :str ,__snake_case :int ) -> Union[str, Any]:
return ParameterProjection(
in_features=lowercase_ ,args_dim=self.args_dim ,domain_map=LambdaLayer(self.domain_map ) ,)
def lowerCamelCase__( self :Union[str, Any] ,*__snake_case :torch.Tensor ) -> str:
raise NotImplementedError()
@staticmethod
def lowerCamelCase__( __snake_case :torch.Tensor ) -> List[Any]:
return (x + torch.sqrt(torch.square(lowercase_ ) + 4.0 )) / 2.0
class snake_case_ (_UpperCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = {'''df''': 1, '''loc''': 1, '''scale''': 1}
UpperCAmelCase__ : Optional[int] = StudentT
@classmethod
def lowerCamelCase__( cls :List[Any] ,__snake_case :torch.Tensor ,__snake_case :torch.Tensor ,__snake_case :torch.Tensor ) -> str:
a__ = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
a__ = 2.0 + cls.squareplus(lowercase_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case_ (_UpperCAmelCase ):
UpperCAmelCase__ : List[str] = {'''loc''': 1, '''scale''': 1}
UpperCAmelCase__ : Dict = Normal
@classmethod
def lowerCamelCase__( cls :int ,__snake_case :torch.Tensor ,__snake_case :torch.Tensor ) -> List[Any]:
a__ = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class snake_case_ (_UpperCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = {'''total_count''': 1, '''logits''': 1}
UpperCAmelCase__ : int = NegativeBinomial
@classmethod
def lowerCamelCase__( cls :Optional[int] ,__snake_case :torch.Tensor ,__snake_case :torch.Tensor ) -> int:
a__ = cls.squareplus(lowercase_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase__( self :int ,__snake_case :Tuple ) -> str:
a__ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowercase_ ,logits=lowercase_ )
else:
return Independent(self.distribution_class(total_count=lowercase_ ,logits=lowercase_ ) ,1 )
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :Optional[torch.Tensor] = None ,__snake_case :Optional[torch.Tensor] = None ) -> Tuple:
a__ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 708 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 0 |
import colorsys
from PIL import Image # type: ignore
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : int ):
a__ = x
a__ = y
for step in range(_lowercase ): # noqa: B007
a__ = a * a - b * b + x
a__ = 2 * a * b + y
a__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __lowercase ( __lowerCAmelCase : float ):
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def __lowercase ( __lowerCAmelCase : float ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_lowercase , 1 , 1 ) )
def __lowercase ( __lowerCAmelCase : int = 8_0_0 , __lowerCAmelCase : int = 6_0_0 , __lowerCAmelCase : float = -0.6 , __lowerCAmelCase : float = 0 , __lowerCAmelCase : float = 3.2 , __lowerCAmelCase : int = 5_0 , __lowerCAmelCase : bool = True , ):
a__ = Image.new('RGB' , (image_width, image_height) )
a__ = img.load()
# loop through the image-coordinates
for image_x in range(_lowercase ):
for image_y in range(_lowercase ):
# determine the figure-coordinates based on the image-coordinates
a__ = figure_width / image_width * image_height
a__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
a__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
a__ = get_distance(_lowercase , _lowercase , _lowercase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
a__ = get_color_coded_rgb(_lowercase )
else:
a__ = get_black_and_white_rgb(_lowercase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case : Union[str, Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 709 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[str] = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class snake_case_ (UpperCAmelCase__ ):
UpperCAmelCase__ : Tuple = '''roc_bert'''
def __init__( self :Tuple ,__snake_case :Optional[int]=3_05_22 ,__snake_case :int=7_68 ,__snake_case :Dict=12 ,__snake_case :List[Any]=12 ,__snake_case :List[Any]=30_72 ,__snake_case :int="gelu" ,__snake_case :List[str]=0.1 ,__snake_case :List[Any]=0.1 ,__snake_case :List[str]=5_12 ,__snake_case :Union[str, Any]=2 ,__snake_case :Union[str, Any]=0.02 ,__snake_case :Tuple=1E-12 ,__snake_case :Tuple=True ,__snake_case :Union[str, Any]=0 ,__snake_case :Optional[int]="absolute" ,__snake_case :int=None ,__snake_case :List[Any]=True ,__snake_case :Union[str, Any]=True ,__snake_case :Optional[Any]=7_68 ,__snake_case :Optional[int]=9_10 ,__snake_case :List[Any]=5_12 ,__snake_case :Any=2_48_58 ,__snake_case :int=True ,**__snake_case :int ,) -> List[str]:
a__ = vocab_size
a__ = max_position_embeddings
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = type_vocab_size
a__ = layer_norm_eps
a__ = use_cache
a__ = enable_pronunciation
a__ = enable_shape
a__ = pronunciation_embed_dim
a__ = pronunciation_vocab_size
a__ = shape_embed_dim
a__ = shape_vocab_size
a__ = concat_input
a__ = position_embedding_type
a__ = classifier_dropout
super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
| 710 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 | 0 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 0 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
snake_case : Any = 'pt'
elif is_tf_available():
snake_case : str = 'tf'
else:
snake_case : Optional[int] = 'jax'
class snake_case_ (UpperCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = PerceiverTokenizer
UpperCAmelCase__ : int = False
def lowerCamelCase__( self :List[Any] ) -> Optional[Any]:
super().setUp()
a__ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__( self :Optional[Any] ) -> Any:
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowerCamelCase__( self :Optional[Any] ,**__snake_case :Union[str, Any] ) -> Optional[Any]:
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCamelCase__ )
def lowerCamelCase__( self :List[Any] ,__snake_case :List[Any] ,__snake_case :Union[str, Any]=False ,__snake_case :Tuple=20 ,__snake_case :Dict=5 ) -> Optional[Any]:
a__ = []
for i in range(len(UpperCamelCase__ ) ):
try:
a__ = tokenizer.decode([i] ,clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
a__ = list(filter(lambda __snake_case : re.match(R'^[ a-zA-Z]+$' ,t[1] ) ,UpperCamelCase__ ) )
a__ = list(filter(lambda __snake_case : [t[0]] == tokenizer.encode(t[1] ,add_special_tokens=UpperCamelCase__ ) ,UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
a__ = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
a__ = toks + toks
# toks_str = [t[1] for t in toks]
a__ = [t[0] for t in toks]
# Ensure consistency
a__ = tokenizer.decode(UpperCamelCase__ ,clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
a__ = (
tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=UpperCamelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
a__ = ''' ''' + output_txt
a__ = tokenizer.encode(UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def lowerCamelCase__( self :Optional[int] ) -> int:
a__ = self.perceiver_tokenizer
a__ = '''Unicode €.'''
a__ = tokenizer(UpperCamelCase__ )
a__ = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded['input_ids'] ,UpperCamelCase__ )
# decoding
a__ = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ ,'[CLS]Unicode €.[SEP]' )
a__ = tokenizer('e è é ê ë' )
a__ = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded['input_ids'] ,UpperCamelCase__ )
# decoding
a__ = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ ,'[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) ,'[CLS]e è é ê ë[SEP]' )
def lowerCamelCase__( self :Union[str, Any] ) -> str:
a__ = self.perceiver_tokenizer
a__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
a__ = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
a__ = tokenizer(UpperCamelCase__ ,padding=UpperCamelCase__ ,return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ ,UpperCamelCase__ )
if FRAMEWORK != "jax":
a__ = list(batch.input_ids.numpy()[0] )
else:
a__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
self.assertEqual((2, 38) ,batch.input_ids.shape )
self.assertEqual((2, 38) ,batch.attention_mask.shape )
def lowerCamelCase__( self :List[str] ) -> Union[str, Any]:
a__ = self.perceiver_tokenizer
a__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
a__ = tokenizer(UpperCamelCase__ ,padding=UpperCamelCase__ ,return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' ,UpperCamelCase__ )
self.assertIn('attention_mask' ,UpperCamelCase__ )
self.assertNotIn('decoder_input_ids' ,UpperCamelCase__ )
self.assertNotIn('decoder_attention_mask' ,UpperCamelCase__ )
def lowerCamelCase__( self :Any ) -> Tuple:
a__ = self.perceiver_tokenizer
a__ = [
'''Summary of the text.''',
'''Another summary.''',
]
a__ = tokenizer(
text_target=UpperCamelCase__ ,max_length=32 ,padding='max_length' ,truncation=UpperCamelCase__ ,return_tensors=UpperCamelCase__ )
self.assertEqual(32 ,targets['input_ids'].shape[1] )
def lowerCamelCase__( self :List[Any] ) -> str:
a__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
a__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ = tempfile.mkdtemp()
a__ = ''' He is very happy, UNwant\u00E9d,running'''
a__ = tokenizer.encode(UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
a__ = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
a__ = after_tokenizer.encode(UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
a__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ = tempfile.mkdtemp()
a__ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['bim', 'bambam'] )
a__ = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
a__ = tokenizer.encode(UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
a__ = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
a__ = after_tokenizer.encode(UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ )
self.assertIn('new_additional_special_token' ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
a__ = tokenizer.__class__.from_pretrained(UpperCamelCase__ ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(UpperCamelCase__ )
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ ,'special_tokens_map.json' ) ,encoding='utf-8' ) as json_file:
a__ = json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ ,'tokenizer_config.json' ) ,encoding='utf-8' ) as json_file:
a__ = json.load(UpperCamelCase__ )
a__ = [F'<extra_id_{i}>' for i in range(1_25 )]
a__ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
a__ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(UpperCamelCase__ ,'special_tokens_map.json' ) ,'w' ,encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase__ ,UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ ,'tokenizer_config.json' ) ,'w' ,encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase__ ,UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a__ = tokenizer_class.from_pretrained(
UpperCamelCase__ ,)
self.assertIn(
'an_additional_special_token' ,tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) ,)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a__ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' ,lstrip=UpperCamelCase__ )]
a__ = tokenizer_class.from_pretrained(
UpperCamelCase__ ,additional_special_tokens=UpperCamelCase__ ,)
self.assertIn('a_new_additional_special_token' ,tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] ,tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) ,)
def lowerCamelCase__( self :str ) -> Any:
a__ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) ,'�' )
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
pass
def lowerCamelCase__( self :Union[str, Any] ) -> Dict:
pass
def lowerCamelCase__( self :Optional[Any] ) -> str:
pass
def lowerCamelCase__( self :List[str] ) -> Dict:
pass
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
a__ = self.get_tokenizers(fast=UpperCamelCase__ ,do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a__ = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
a__ = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ ,UpperCamelCase__ )
| 712 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Optional[Any] = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = '''nllb-moe'''
UpperCAmelCase__ : Union[str, Any] = ['''past_key_values''']
UpperCAmelCase__ : List[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :Dict ,__snake_case :Union[str, Any]=12_81_12 ,__snake_case :Optional[int]=10_24 ,__snake_case :Union[str, Any]=12 ,__snake_case :str=40_96 ,__snake_case :Union[str, Any]=16 ,__snake_case :List[str]=12 ,__snake_case :Optional[Any]=40_96 ,__snake_case :List[str]=16 ,__snake_case :str=0.05 ,__snake_case :Optional[int]=0.05 ,__snake_case :Optional[Any]=True ,__snake_case :int=True ,__snake_case :Dict="relu" ,__snake_case :Any=10_24 ,__snake_case :Any=0.1 ,__snake_case :List[Any]=0.1 ,__snake_case :Any=0.0 ,__snake_case :Dict=0.02 ,__snake_case :List[str]=2 ,__snake_case :str=True ,__snake_case :Any=False ,__snake_case :Any="float32" ,__snake_case :Optional[int]=False ,__snake_case :Tuple=1_28 ,__snake_case :Tuple=64 ,__snake_case :Dict=4 ,__snake_case :Union[str, Any]=4 ,__snake_case :Any=0.0_01 ,__snake_case :str=0.0_01 ,__snake_case :List[str]="all" ,__snake_case :Optional[int]=False ,__snake_case :Any=False ,__snake_case :Optional[Any]=1.0 ,__snake_case :int=0.2 ,__snake_case :Union[str, Any]=1 ,__snake_case :List[Any]=0 ,__snake_case :Optional[int]=2 ,__snake_case :Optional[Any]=False ,**__snake_case :Optional[int] ,) -> Optional[Any]:
a__ = vocab_size
a__ = max_position_embeddings
a__ = d_model
a__ = encoder_ffn_dim
a__ = encoder_layers
a__ = encoder_attention_heads
a__ = decoder_ffn_dim
a__ = decoder_layers
a__ = decoder_attention_heads
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = activation_function
a__ = init_std
a__ = encoder_layerdrop
a__ = decoder_layerdrop
a__ = use_cache
a__ = encoder_layers
a__ = scale_embedding # scale factor will be sqrt(d_model) if True
a__ = router_z_loss_coef
a__ = router_aux_loss_coef
a__ = decoder_sparse_step
a__ = encoder_sparse_step
a__ = num_experts
a__ = expert_capacity
a__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
a__ = router_dtype
a__ = router_ignore_padding_tokens
a__ = batch_prioritized_routing
a__ = second_expert_policy
a__ = normalize_router_prob_before_dropping
a__ = moe_eval_capacity_token_fraction
a__ = moe_token_dropout
a__ = output_router_logits
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,is_encoder_decoder=_SCREAMING_SNAKE_CASE ,decoder_start_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
| 713 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 | 0 |
def __lowercase ( __lowerCAmelCase : Tuple ):
return str(__lowerCAmelCase ) == str(__lowerCAmelCase )[::-1]
def __lowercase ( __lowerCAmelCase : List[Any] ):
return int(__lowerCAmelCase ) + int(str(__lowerCAmelCase )[::-1] )
def __lowercase ( __lowerCAmelCase : Optional[int] = 1_0_0_0_0 ):
a__ = []
for num in range(1 , __lowerCAmelCase ):
a__ = 0
a__ = num
while iterations < 5_0:
a__ = sum_reverse(__lowerCAmelCase )
iterations += 1
if is_palindrome(__lowerCAmelCase ):
break
else:
lychrel_nums.append(__lowerCAmelCase )
return len(__lowerCAmelCase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 714 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 0 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
snake_case : str = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
snake_case : Optional[int] = get_tests_dir('''fixtures/vocab.json''')
snake_case : Tuple = get_tests_dir('''fixtures''')
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
a__ = 0
def lowerCamelCase__( self :List[Any] ) -> str:
a__ = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__lowercase ,__lowercase )
def lowerCamelCase__( self :List[Any] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = WavaVecaConfig()
a__ = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
a__ = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__lowercase ,os.path.join(__lowercase ,__lowercase ) )
copyfile(__lowercase ,os.path.join(__lowercase ,'vocab.json' ) )
a__ = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
def lowerCamelCase__( self :List[str] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = WavaVecaFeatureExtractor()
a__ = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
a__ = WavaVecaProcessor(__lowercase ,__lowercase )
# save in new folder
processor.save_pretrained(__lowercase )
# drop `processor_class` in tokenizer
with open(os.path.join(__lowercase ,__lowercase ) ,'r' ) as f:
a__ = json.load(__lowercase )
config_dict.pop('processor_class' )
with open(os.path.join(__lowercase ,__lowercase ) ,'w' ) as f:
f.write(json.dumps(__lowercase ) )
a__ = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
def lowerCamelCase__( self :Dict ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = WavaVecaFeatureExtractor()
a__ = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
a__ = WavaVecaProcessor(__lowercase ,__lowercase )
# save in new folder
processor.save_pretrained(__lowercase )
# drop `processor_class` in feature extractor
with open(os.path.join(__lowercase ,__lowercase ) ,'r' ) as f:
a__ = json.load(__lowercase )
config_dict.pop('processor_class' )
with open(os.path.join(__lowercase ,__lowercase ) ,'w' ) as f:
f.write(json.dumps(__lowercase ) )
a__ = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
def lowerCamelCase__( self :Dict ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(__lowercase )
# copy relevant files
copyfile(__lowercase ,os.path.join(__lowercase ,'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(__lowercase ,__lowercase ) ,'w' ) as f:
f.write('{}' )
a__ = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
def lowerCamelCase__( self :List[Any] ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase ):
a__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase ):
a__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=__lowercase )
a__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' ,trust_remote_code=__lowercase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
a__ = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
a__ = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizerFast' )
# Test we can also load the slow version
a__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=__lowercase ,use_fast=__lowercase )
a__ = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ ,'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
def lowerCamelCase__( self :int ) -> Dict:
try:
AutoConfig.register('custom' ,__lowercase )
AutoFeatureExtractor.register(__lowercase ,__lowercase )
AutoTokenizer.register(__lowercase ,slow_tokenizer_class=__lowercase )
AutoProcessor.register(__lowercase ,__lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoProcessor.register(__lowercase ,__lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
a__ = CustomFeatureExtractor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
a__ = os.path.join(__lowercase ,'vocab.txt' )
with open(__lowercase ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
a__ = CustomTokenizer(__lowercase )
a__ = CustomProcessor(__lowercase ,__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__lowercase )
a__ = AutoProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__( self :Dict ) -> Optional[Any]:
class snake_case_ (lowercase__ ):
UpperCAmelCase__ : Union[str, Any] = False
class snake_case_ (lowercase__ ):
UpperCAmelCase__ : Dict = False
class snake_case_ (lowercase__ ):
UpperCAmelCase__ : Union[str, Any] = """AutoFeatureExtractor"""
UpperCAmelCase__ : List[str] = """AutoTokenizer"""
UpperCAmelCase__ : Union[str, Any] = False
try:
AutoConfig.register('custom' ,__lowercase )
AutoFeatureExtractor.register(__lowercase ,__lowercase )
AutoTokenizer.register(__lowercase ,slow_tokenizer_class=__lowercase )
AutoProcessor.register(__lowercase ,__lowercase )
# If remote code is not set, the default is to use local classes.
a__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
a__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=__lowercase )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
a__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=__lowercase )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__( self :Optional[Any] ) -> int:
a__ = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ ,'BertTokenizerFast' )
def lowerCamelCase__( self :str ) -> Union[str, Any]:
a__ = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ ,'ConvNextImageProcessor' )
@is_staging_test
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def lowerCamelCase__( cls :Union[str, Any] ) -> List[str]:
a__ = TOKEN
HfFolder.save_token(__lowercase )
@classmethod
def lowerCamelCase__( cls :int ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-processor' )
except HTTPError:
pass
def lowerCamelCase__( self :Dict ) -> str:
a__ = WavaVecaProcessor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowercase ,'test-processor' ) ,push_to_hub=__lowercase ,use_auth_token=self._token )
a__ = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowercase ,getattr(new_processor.feature_extractor ,__lowercase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() )
def lowerCamelCase__( self :Tuple ) -> str:
a__ = WavaVecaProcessor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__lowercase ,'test-processor-org' ) ,push_to_hub=__lowercase ,use_auth_token=self._token ,organization='valid_org' ,)
a__ = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__lowercase ,getattr(new_processor.feature_extractor ,__lowercase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() )
def lowerCamelCase__( self :List[Any] ) -> str:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
a__ = CustomFeatureExtractor.from_pretrained(__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
a__ = os.path.join(__lowercase ,'vocab.txt' )
with open(__lowercase ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
a__ = CustomTokenizer(__lowercase )
a__ = CustomProcessor(__lowercase ,__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' ,token=self._token )
a__ = Repository(__lowercase ,clone_from=F'{USER}/test-dynamic-processor' ,token=self._token )
processor.save_pretrained(__lowercase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map ,{
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} ,)
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__lowercase ,'tokenizer_config.json' ) ) as f:
a__ = json.load(__lowercase )
self.assertDictEqual(
tokenizer_config['auto_map'] ,{
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} ,)
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__lowercase ,'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowercase ,'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__lowercase ,'custom_processing.py' ) ) )
repo.push_to_hub()
a__ = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' ,trust_remote_code=__lowercase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ ,'CustomProcessor' )
| 715 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 0 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
snake_case : Dict = logging.getLogger(__name__)
class snake_case_ (__snake_case ):
UpperCAmelCase__ : Any = 'sequence-classification'
def __init__( self :List[Any] ,__snake_case :Any ) -> int:
if type(A_ ) == dict:
a__ = Namespace(**A_ )
a__ = glue_output_modes[hparams.task]
a__ = glue_tasks_num_labels[hparams.task]
super().__init__(A_ ,A_ ,self.mode )
def lowerCamelCase__( self :Tuple ,**__snake_case :Union[str, Any] ) -> Union[str, Any]:
return self.model(**A_ )
def lowerCamelCase__( self :List[Any] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ) -> Dict:
a__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
a__ = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
a__ = self(**A_ )
a__ = outputs[0]
a__ = self.trainer.lr_schedulers[0]["scheduler"]
a__ = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def lowerCamelCase__( self :List[Any] ) -> Dict:
a__ = self.hparams
a__ = processors[args.task]()
a__ = processor.get_labels()
for mode in ["train", "dev"]:
a__ = self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' ,A_ )
else:
logger.info('Creating features from dataset file at %s' ,args.data_dir )
a__ = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
a__ = convert_examples_to_features(
A_ ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info('Saving features into cached file %s' ,A_ )
torch.save(A_ ,A_ )
def lowerCamelCase__( self :Dict ,__snake_case :Tuple ,__snake_case :Optional[Any] ,__snake_case :int = False ) -> Union[str, Any]:
a__ = "dev" if mode == "test" else mode
a__ = self._feature_file(A_ )
logger.info('Loading features from cached file %s' ,A_ )
a__ = torch.load(A_ )
a__ = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
a__ = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
a__ = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
a__ = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
a__ = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(A_ ,A_ ,A_ ,A_ ) ,batch_size=A_ ,shuffle=A_ ,)
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ,__snake_case :List[str] ) -> Optional[Any]:
a__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
a__ = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
a__ = self(**A_ )
a__ = outputs[:2]
a__ = logits.detach().cpu().numpy()
a__ = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCamelCase__( self :List[Any] ,__snake_case :int ) -> Any:
a__ = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
a__ = np.concatenate([x['pred'] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
a__ = np.argmax(A_ ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
a__ = np.squeeze(A_ )
a__ = np.concatenate([x['target'] for x in outputs] ,axis=0 )
a__ = [[] for _ in range(out_label_ids.shape[0] )]
a__ = [[] for _ in range(out_label_ids.shape[0] )]
a__ = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task ,A_ ,A_ )}
a__ = dict(results.items() )
a__ = results
return ret, preds_list, out_label_list
def lowerCamelCase__( self :Any ,__snake_case :int ) -> int:
a__ = self._eval_end(A_ )
a__ = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCamelCase__( self :int ,__snake_case :List[str] ) -> Optional[Any]:
a__ = self._eval_end(A_ )
a__ = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCamelCase__( __snake_case :List[Any] ,__snake_case :int ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(A_ ,A_ )
parser.add_argument(
'--max_seq_length' ,default=1_28 ,type=A_ ,help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) ,)
parser.add_argument(
'--task' ,default='' ,type=A_ ,required=A_ ,help='The GLUE task to run' ,)
parser.add_argument(
'--gpus' ,default=0 ,type=A_ ,help='The number of GPUs allocated for this, it is by default 0 meaning none' ,)
parser.add_argument(
'--overwrite_cache' ,action='store_true' ,help='Overwrite the cached training and evaluation sets' )
return parser
def __lowercase ( ):
a__ = argparse.ArgumentParser()
add_generic_args(_lowerCAmelCase , os.getcwd() )
a__ = GLUETransformer.add_model_specific_args(_lowerCAmelCase , os.getcwd() )
a__ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
a__ = os.path.join(
'./results' , F'{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}' , )
os.makedirs(args.output_dir )
a__ = GLUETransformer(_lowerCAmelCase )
a__ = generic_train(_lowerCAmelCase , _lowerCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
a__ = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=_lowerCAmelCase ) )
a__ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 716 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 , __lowerCAmelCase : int = 1_0 ):
a__ = defaultdict(__lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
a__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
a__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 717 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 0 |
from __future__ import annotations
snake_case : Optional[Any] = 10
def __lowercase ( __lowerCAmelCase : Any ):
a__ = 1
a__ = max(__lowerCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
a__ = [[] for _ in range(__lowerCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
a__ = int((i / placement) % RADIX )
buckets[tmp].append(__lowerCAmelCase )
# put each buckets' contents into list_of_ints
a__ = 0
for b in range(__lowerCAmelCase ):
for i in buckets[b]:
a__ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase__( self :Union[str, Any] ) -> int:
a__ = 1
a__ = 3
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCAmelCase_ )
return image
@property
def lowerCamelCase__( self :Union[str, Any] ) -> str:
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
return model
@property
def lowerCamelCase__( self :Optional[int] ) -> str:
torch.manual_seed(0 )
a__ = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
return model
@property
def lowerCamelCase__( self :Union[str, Any] ) -> int:
torch.manual_seed(0 )
a__ = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=50_06 ,)
return RobertaSeriesModelWithTransformation(lowerCAmelCase_ )
@property
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
def extract(*__snake_case :int ,**__snake_case :int ):
class snake_case_ :
def __init__( self :int ) -> Optional[int]:
a__ = torch.ones([0] )
def lowerCamelCase__( self :List[Any] ,__snake_case :Optional[int] ) -> Union[str, Any]:
self.pixel_values.to(lowerCAmelCase_ )
return self
return Out()
return extract
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
a__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ = self.dummy_cond_unet
a__ = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
a__ = self.dummy_vae
a__ = self.dummy_text_encoder
a__ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
a__ = 77
a__ = self.dummy_image.to(lowerCAmelCase_ )
a__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
a__ = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase_ ,scheduler=lowerCAmelCase_ ,vae=lowerCAmelCase_ ,text_encoder=lowerCAmelCase_ ,tokenizer=lowerCAmelCase_ ,safety_checker=lowerCAmelCase_ ,feature_extractor=self.dummy_extractor ,)
a__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=lowerCAmelCase_ )
a__ = alt_pipe.to(lowerCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a__ = 'A painting of a squirrel eating a burger'
a__ = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
a__ = alt_pipe(
[prompt] ,generator=lowerCAmelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np' ,image=lowerCAmelCase_ ,)
a__ = output.images
a__ = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
a__ = alt_pipe(
[prompt] ,generator=lowerCAmelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np' ,image=lowerCAmelCase_ ,return_dict=lowerCAmelCase_ ,)[0]
a__ = image[0, -3:, -3:, -1]
a__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a__ = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' ,'This test requires a GPU' )
def lowerCamelCase__( self :Optional[int] ) -> Tuple:
a__ = self.dummy_cond_unet
a__ = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
a__ = self.dummy_vae
a__ = self.dummy_text_encoder
a__ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
a__ = 77
a__ = self.dummy_image.to(lowerCAmelCase_ )
# put models in fp16
a__ = unet.half()
a__ = vae.half()
a__ = bert.half()
# make sure here that pndm scheduler skips prk
a__ = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase_ ,scheduler=lowerCAmelCase_ ,vae=lowerCAmelCase_ ,text_encoder=lowerCAmelCase_ ,tokenizer=lowerCAmelCase_ ,safety_checker=lowerCAmelCase_ ,feature_extractor=self.dummy_extractor ,)
a__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=lowerCAmelCase_ )
a__ = alt_pipe.to(lowerCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
a__ = 'A painting of a squirrel eating a burger'
a__ = torch.manual_seed(0 )
a__ = alt_pipe(
[prompt] ,generator=lowerCAmelCase_ ,num_inference_steps=2 ,output_type='np' ,image=lowerCAmelCase_ ,).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' ,'This test requires a GPU' )
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
a__ = init_image.resize((7_60, 5_04) )
a__ = 'BAAI/AltDiffusion'
a__ = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase_ ,safety_checker=lowerCAmelCase_ ,)
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
a__ = 'A fantasy landscape, trending on artstation'
a__ = torch.manual_seed(0 )
a__ = pipe(
prompt=lowerCAmelCase_ ,image=lowerCAmelCase_ ,strength=0.75 ,guidance_scale=7.5 ,generator=lowerCAmelCase_ ,output_type='np' ,)
a__ = output.images[0]
a__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
a__ = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Tuple ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :str ) -> Tuple:
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
a__ = init_image.resize((7_68, 5_12) )
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
a__ = 'BAAI/AltDiffusion'
a__ = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase_ ,safety_checker=lowerCAmelCase_ ,)
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
a__ = 'A fantasy landscape, trending on artstation'
a__ = torch.manual_seed(0 )
a__ = pipe(
prompt=lowerCAmelCase_ ,image=lowerCAmelCase_ ,strength=0.75 ,guidance_scale=7.5 ,generator=lowerCAmelCase_ ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 719 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case : List[str] = logging.get_logger(__name__)
snake_case : str = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class snake_case_ (__lowercase , __lowercase ):
UpperCAmelCase__ : Any = '''nat'''
UpperCAmelCase__ : List[str] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Optional[Any] ,__snake_case :int=4 ,__snake_case :Union[str, Any]=3 ,__snake_case :Optional[int]=64 ,__snake_case :Optional[int]=[3, 4, 6, 5] ,__snake_case :Optional[int]=[2, 4, 8, 16] ,__snake_case :Union[str, Any]=7 ,__snake_case :Tuple=3.0 ,__snake_case :str=True ,__snake_case :Tuple=0.0 ,__snake_case :Any=0.0 ,__snake_case :Dict=0.1 ,__snake_case :List[Any]="gelu" ,__snake_case :List[Any]=0.02 ,__snake_case :Dict=1E-5 ,__snake_case :Union[str, Any]=0.0 ,__snake_case :Optional[int]=None ,__snake_case :List[str]=None ,**__snake_case :Tuple ,) -> List[str]:
super().__init__(**__a )
a__ = patch_size
a__ = num_channels
a__ = embed_dim
a__ = depths
a__ = len(__a )
a__ = num_heads
a__ = kernel_size
a__ = mlp_ratio
a__ = qkv_bias
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = drop_path_rate
a__ = hidden_act
a__ = layer_norm_eps
a__ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a__ = int(embed_dim * 2 ** (len(__a ) - 1) )
a__ = layer_scale_init_value
a__ = ["""stem"""] + [F'stage{idx}' for idx in range(1 ,len(__a ) + 1 )]
a__ = get_aligned_output_features_output_indices(
out_features=__a ,out_indices=__a ,stage_names=self.stage_names )
| 720 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 0 |
from collections.abc import Sequence
def __lowercase ( __lowerCAmelCase : Sequence[int] | None = None ):
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
a__ = nums[0]
for i in range(1 , len(_UpperCamelCase ) ):
a__ = nums[i]
a__ = max(_UpperCamelCase , ans + num , _UpperCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
snake_case : str = int(input('''Enter number of elements : ''').strip())
snake_case : int = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 721 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case_ (__a ):
@staticmethod
@abstractmethod
def lowerCamelCase__( __snake_case :str ) -> Dict:
raise NotImplementedError()
@abstractmethod
def lowerCamelCase__( self :List[Any] ) -> List[str]:
raise NotImplementedError()
| 700 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class snake_case_ :
def __init__( self :Optional[int] ,__snake_case :Optional[int] ,__snake_case :List[str]=3 ,__snake_case :List[str]=32 ,__snake_case :Optional[Any]=3 ,__snake_case :List[Any]=10 ,__snake_case :Union[str, Any]=[8, 16, 32, 64] ,__snake_case :List[str]=[1, 1, 2, 1] ,__snake_case :List[str]=True ,__snake_case :Optional[int]=True ,__snake_case :Any="relu" ,__snake_case :Union[str, Any]=3 ,__snake_case :int=None ,__snake_case :str=["stage2", "stage3", "stage4"] ,__snake_case :List[Any]=[2, 3, 4] ,__snake_case :Union[str, Any]=1 ,) -> List[Any]:
a__ = parent
a__ = batch_size
a__ = image_size
a__ = num_channels
a__ = embeddings_size
a__ = hidden_sizes
a__ = depths
a__ = is_training
a__ = use_labels
a__ = hidden_act
a__ = num_labels
a__ = scope
a__ = len(__snake_case )
a__ = out_features
a__ = out_indices
a__ = num_groups
def lowerCamelCase__( self :Dict ) -> Optional[int]:
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.num_labels )
a__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__( self :Optional[int] ) -> Optional[int]:
return BitConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,)
def lowerCamelCase__( self :str ,__snake_case :Any ,__snake_case :Dict ,__snake_case :Optional[int] ) -> List[Any]:
a__ = BitModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ,__snake_case :Optional[int] ,__snake_case :List[str] ) -> List[str]:
a__ = self.num_labels
a__ = BitForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__( self :Tuple ,__snake_case :List[Any] ,__snake_case :List[Any] ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = BitBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a__ = None
a__ = BitBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def lowerCamelCase__( self :Tuple ) -> List[str]:
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase__ : Dict = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Any = False
def lowerCamelCase__( self :Any ) -> Optional[int]:
a__ = BitModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,has_text_modality=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__( self :int ) -> List[str]:
return
@unittest.skip(reason='Bit does not output attentions' )
def lowerCamelCase__( self :List[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[int]:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def lowerCamelCase__( self :List[str] ) -> Dict:
pass
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__snake_case )
def lowerCamelCase__( self :List[str] ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case )
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(config=__snake_case )
for name, module in model.named_modules():
if isinstance(__snake_case ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,)
def lowerCamelCase__( self :Optional[Any] ) -> List[Any]:
def check_hidden_states_output(__snake_case :int ,__snake_case :Optional[Any] ,__snake_case :Dict ):
a__ = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__snake_case ,__snake_case ) )
a__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a__ = self.model_tester.num_stages
self.assertEqual(len(__snake_case ) ,expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
a__ = layer_type
a__ = True
check_hidden_states_output(__snake_case ,__snake_case ,__snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
check_hidden_states_output(__snake_case ,__snake_case ,__snake_case )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def lowerCamelCase__( self :Union[str, Any] ) -> int:
pass
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def lowerCamelCase__( self :List[Any] ) -> Optional[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = BitModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def __lowercase ( ):
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case_ (unittest.TestCase ):
@cached_property
def lowerCamelCase__( self :Any ) -> Dict:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase__( self :Tuple ) -> Dict:
a__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__snake_case )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=__snake_case ,return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
a__ = model(**__snake_case )
# verify the logits
a__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,__snake_case )
a__ = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__snake_case ,atol=1E-4 ) )
@require_torch
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : List[str] = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase__ : Any = BitConfig
UpperCAmelCase__ : Optional[int] = False
def lowerCamelCase__( self :Union[str, Any] ) -> Union[str, Any]:
a__ = BitModelTester(self )
| 701 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
snake_case : Optional[int] = '''\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'''
snake_case : str = '''\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'''
snake_case : Tuple = '''\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :List[str] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Any ,__snake_case :str ,__snake_case :Optional[Any]=None ,__snake_case :Tuple=None ,__snake_case :Any=None ,__snake_case :int=None ,__snake_case :Dict="auto" ,__snake_case :List[Any]=-1 ,__snake_case :Tuple=0.9 ,__snake_case :Tuple=5 ,__snake_case :str=5_00 ,__snake_case :Tuple="gpt2-large" ,__snake_case :List[Any]=-1 ,__snake_case :Optional[int]=10_24 ,__snake_case :List[Any]=25 ,__snake_case :Dict=5 ,__snake_case :Tuple=True ,__snake_case :Optional[Any]=25 ,) -> Union[str, Any]:
a__ = compute_mauve(
p_text=__UpperCamelCase ,q_text=__UpperCamelCase ,p_features=__UpperCamelCase ,q_features=__UpperCamelCase ,p_tokens=__UpperCamelCase ,q_tokens=__UpperCamelCase ,num_buckets=__UpperCamelCase ,pca_max_data=__UpperCamelCase ,kmeans_explained_var=__UpperCamelCase ,kmeans_num_redo=__UpperCamelCase ,kmeans_max_iter=__UpperCamelCase ,featurize_model_name=__UpperCamelCase ,device_id=__UpperCamelCase ,max_text_length=__UpperCamelCase ,divergence_curve_discretization_size=__UpperCamelCase ,mauve_scaling_factor=__UpperCamelCase ,verbose=__UpperCamelCase ,seed=__UpperCamelCase ,)
return out
| 702 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
import torch
def __lowercase ( ):
if torch.cuda.is_available():
a__ = torch.cuda.device_count()
else:
a__ = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 703 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
snake_case : Optional[int] = logging.get_logger(__name__)
class snake_case_ (_UpperCAmelCase ):
def __init__( self :Tuple ,*__snake_case :str ,**__snake_case :int ) -> Union[str, Any]:
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 704 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 0 |
def __lowercase ( __lowerCAmelCase : float ):
return 1_0 - x * x
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(a_ ) * equation(a_ ) >= 0:
raise ValueError('Wrong space!' )
a__ = a
while (b - a) >= 0.01:
# Find middle point
a__ = (a + b) / 2
# Check if middle point is root
if equation(a_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(a_ ) * equation(a_ ) < 0:
a__ = c
else:
a__ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 705 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
from collections.abc import Generator
def __lowercase ( ):
a__ , a__ = 0, 1
while True:
a__ , a__ = b, a + b
yield b
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0 ):
a__ = 1
a__ = fibonacci_generator()
while len(str(next(__lowerCAmelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 706 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = ['''model.decoder.embed_positions.weights''']
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
if "emb" in name:
a__ = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
a__ = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
a__ = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
a__ = name.replace('linear1' , 'fc1' )
if "linear2" in name:
a__ = name.replace('linear2' , 'fc2' )
if "norm1" in name:
a__ = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
a__ = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
a__ = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
a__ = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
a__ = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
a__ = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
a__ = list(state_dict.keys() )
a__ = {}
for key in keys:
a__ = state_dict.pop(lowercase_ )
a__ = rename_keys(lowercase_ )
if "in_proj_weight" in key:
# split fused qkv proj
a__ = val[:hidden_size, :]
a__ = val[hidden_size : 2 * hidden_size, :]
a__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
a__ = val
else:
a__ = val
return state_dict, enc_dec_proj_state_dict
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
if checkpoint == "small":
# default config values
a__ = 1_0_2_4
a__ = 2_4
a__ = 1_6
elif checkpoint == "medium":
a__ = 1_5_3_6
a__ = 4_8
a__ = 2_4
elif checkpoint == "large":
a__ = 2_0_4_8
a__ = 4_8
a__ = 3_2
else:
raise ValueError(F'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
a__ = MusicgenDecoderConfig(
hidden_size=lowercase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowercase_ , num_attention_heads=lowercase_ , )
return config
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Any="cpu" ):
a__ = MusicGen.get_pretrained(lowercase_ , device=lowercase_ )
a__ = decoder_config_from_checkpoint(lowercase_ )
a__ = fairseq_model.lm.state_dict()
a__ = rename_state_dict(
lowercase_ , hidden_size=decoder_config.hidden_size )
a__ = TaEncoderModel.from_pretrained('t5-base' )
a__ = EncodecModel.from_pretrained('facebook/encodec_32khz' )
a__ = MusicgenForCausalLM(lowercase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
a__ = decoder.load_state_dict(lowercase_ , strict=lowercase_ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowercase_ )
if len(lowercase_ ) > 0:
raise ValueError(F'Missing key(s) in state_dict: {missing_keys}' )
if len(lowercase_ ) > 0:
raise ValueError(F'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
a__ = MusicgenForConditionalGeneration(text_encoder=lowercase_ , audio_encoder=lowercase_ , decoder=lowercase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowercase_ )
# check we can do a forward pass
a__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
a__ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
a__ = model(input_ids=lowercase_ , decoder_input_ids=lowercase_ ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
a__ = AutoTokenizer.from_pretrained('t5-base' )
a__ = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
a__ = MusicgenProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
# set the appropriate bos/pad token ids
a__ = 2_0_4_8
a__ = 2_0_4_8
# set other default generation config params
a__ = int(3_0 * audio_encoder.config.frame_rate )
a__ = True
a__ = 3.0
if pytorch_dump_folder is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
logger.info(F'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if repo_id:
logger.info(F'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(lowercase_ )
processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
snake_case = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 707 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 0 |
snake_case : Any = 8.314462 # Unit - J mol-1 K-1
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 708 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
debug_launcher(test_script.main )
def lowerCamelCase__( self :int ) -> Dict:
debug_launcher(test_ops.main )
| 709 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
snake_case : Tuple = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
snake_case : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case : Any = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : bool , __lowerCAmelCase : bool ):
def run_func(__lowerCAmelCase : Optional[int] ):
@wraps(__UpperCamelCase )
def run_in_eager_mode(*__lowerCAmelCase : Tuple , **__lowerCAmelCase : Tuple ):
return func(*__UpperCamelCase , **__UpperCamelCase )
@wraps(__UpperCamelCase )
@tf.function(experimental_compile=__UpperCamelCase )
def run_in_graph_mode(*__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[int] ):
return func(*__UpperCamelCase , **__UpperCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
a__ = random.Random()
a__ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__UpperCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class snake_case_ (SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ : int = 4_2
UpperCAmelCase__ : Optional[int] = 4_2
UpperCAmelCase__ : Optional[Any] = '''TensorFlow'''
@property
def lowerCamelCase__( self :Any ) -> List[str]:
return tf.__version__
def lowerCamelCase__( self :List[str] ,__snake_case :List[str] ,__snake_case :Optional[int] ,__snake_case :Optional[Any] ) -> List[str]:
# initialize GPU on separate process
a__ = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
a__ = self._prepare_inference_func(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
return self._measure_speed(_inference )
def lowerCamelCase__( self :List[str] ,__snake_case :Dict ,__snake_case :str ,__snake_case :Tuple ) -> Any:
a__ = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
a__ = self._prepare_train_func(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
return self._measure_speed(_train )
def lowerCamelCase__( self :List[Any] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Tuple ) -> str:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,UpperCamelCase__ )
a__ = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
a__ = self._prepare_inference_func(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
return self._measure_memory(_inference )
def lowerCamelCase__( self :Tuple ,__snake_case :Tuple ,__snake_case :Union[str, Any] ,__snake_case :Dict ) -> List[str]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,UpperCamelCase__ )
a__ = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
a__ = self._prepare_train_func(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
return self._measure_memory(_train )
def lowerCamelCase__( self :Tuple ,__snake_case :str ,__snake_case :Optional[Any] ,__snake_case :int ) -> Union[str, Any]:
a__ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
a__ = (
hasattr(UpperCamelCase__ ,'architectures' )
and isinstance(config.architectures ,UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a__ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
a__ = __import__('transformers' ,fromlist=[model_class] )
a__ = getattr(UpperCamelCase__ ,UpperCamelCase__ )
a__ = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
a__ = TF_MODEL_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
a__ = config.vocab_size if hasattr(UpperCamelCase__ ,'vocab_size' ) else config.encoder.vocab_size
a__ = random_input_ids(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCamelCase__ ,decoder_input_ids=UpperCamelCase__ ,training=UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_forward():
return model(UpperCamelCase__ ,training=UpperCamelCase__ )
a__ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ,__snake_case :Tuple ) -> List[str]:
a__ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
a__ = (
hasattr(UpperCamelCase__ ,'architectures' )
and isinstance(config.architectures ,UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a__ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
a__ = __import__('transformers' ,fromlist=[model_class] )
a__ = getattr(UpperCamelCase__ ,UpperCamelCase__ )
a__ = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
a__ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
a__ = config.vocab_size if hasattr(UpperCamelCase__ ,'vocab_size' ) else config.encoder.vocab_size
a__ = random_input_ids(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_decoder_train():
a__ = model(UpperCamelCase__ ,decoder_input_ids=UpperCamelCase__ ,labels=UpperCamelCase__ ,training=UpperCamelCase__ )[0]
a__ = tf.gradients(UpperCamelCase__ ,model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_train():
a__ = model(UpperCamelCase__ ,labels=UpperCamelCase__ ,training=UpperCamelCase__ )[0]
a__ = tf.gradients(UpperCamelCase__ ,model.trainable_variables )
return gradients
a__ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase__( self :Any ,__snake_case :Tuple ) -> List[str]:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(UpperCamelCase__ ,repeat=1 ,number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
a__ = timeit.repeat(
UpperCamelCase__ ,repeat=self.args.repeat ,number=10 ,)
return min(UpperCamelCase__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
def lowerCamelCase__( self :str ,__snake_case :Optional[Any] ) -> Tuple:
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
a__ = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
a__ = '''N/A'''
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
a__ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
a__ = nvml.nvmlDeviceGetMemoryInfo(UpperCamelCase__ )
a__ = meminfo.used
a__ = Memory(UpperCamelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
a__ = None
else:
a__ = measure_peak_memory_cpu(UpperCamelCase__ )
a__ = Memory(UpperCamelCase__ ) if isinstance(UpperCamelCase__ ,UpperCamelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
a__ = stop_memory_tracing(UpperCamelCase__ )
if memory is None:
a__ = summary.total
else:
a__ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 711 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Any = logging.get_logger(__name__)
class snake_case_ (lowerCAmelCase__ ):
UpperCAmelCase__ : str = ["pixel_values"]
def __init__( self :Dict ,__snake_case :Optional[int] = True ,__snake_case :Optional[int] = None ,__snake_case :Optional[int] = PILImageResampling.BICUBIC ,__snake_case :Union[str, Any] = True ,__snake_case :Any = True ,__snake_case :int = 1 / 2_55 ,__snake_case :List[Any] = None ,__snake_case :Tuple = True ,__snake_case :Dict = None ,__snake_case :List[Any] = None ,**__snake_case :Tuple ,) -> None:
super().__init__(**_lowerCamelCase )
a__ = size if size is not None else {'height': 2_24, 'width': 2_24}
a__ = get_size_dict(_lowerCamelCase )
a__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
a__ = get_size_dict(_lowerCamelCase ,default_to_square=_lowerCamelCase ,param_name='crop_size' )
a__ = do_resize
a__ = do_rescale
a__ = do_normalize
a__ = do_center_crop
a__ = crop_size
a__ = size
a__ = resample
a__ = rescale_factor
a__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase__( self :List[str] ,__snake_case :List[str] ,__snake_case :Dict ,__snake_case :Any = PILImageResampling.BILINEAR ,__snake_case :List[Any] = None ,**__snake_case :Any ,) -> np.ndarray:
a__ = get_size_dict(_lowerCamelCase )
if "shortest_edge" in size:
a__ = get_resize_output_image_size(_lowerCamelCase ,size=size['shortest_edge'] ,default_to_square=_lowerCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
a__ = (size['height'], size['width'])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(_lowerCamelCase ,size=_lowerCamelCase ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Dict ,__snake_case :int ,__snake_case :Tuple = None ,**__snake_case :Tuple ,) -> np.ndarray:
a__ = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowerCamelCase ,size=(size['height'], size['width']) ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def lowerCamelCase__( self :Any ,__snake_case :Any ,__snake_case :str ,__snake_case :Any = None ,**__snake_case :str ) -> np.ndarray:
return rescale(_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :Union[str, Any] ,__snake_case :Optional[Any] = None ,**__snake_case :List[str] ,) -> np.ndarray:
return normalize(_lowerCamelCase ,mean=_lowerCamelCase ,std=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def lowerCamelCase__( self :Dict ,__snake_case :Optional[Any] ,__snake_case :Optional[Any] = None ,__snake_case :Dict = None ,__snake_case :Optional[int] = None ,__snake_case :Optional[Any] = None ,__snake_case :Tuple = None ,__snake_case :Optional[int] = None ,__snake_case :Tuple = None ,__snake_case :Any = None ,__snake_case :Union[str, Any] = None ,__snake_case :Dict = None ,__snake_case :int = None ,__snake_case :str = ChannelDimension.FIRST ,**__snake_case :Dict ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ = crop_size if crop_size is not None else self.crop_size
a__ = get_size_dict(_lowerCamelCase ,param_name='crop_size' ,default_to_square=_lowerCamelCase )
a__ = resample if resample is not None else self.resample
a__ = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ = image_mean if image_mean is not None else self.image_mean
a__ = image_std if image_std is not None else self.image_std
a__ = size if size is not None else self.size
a__ = get_size_dict(_lowerCamelCase )
if not is_batched(_lowerCamelCase ):
a__ = [images]
if not valid_images(_lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
a__ = [self.resize(image=_lowerCamelCase ,size=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_center_crop:
a__ = [self.center_crop(image=_lowerCamelCase ,size=_lowerCamelCase ) for image in images]
if do_rescale:
a__ = [self.rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ) for image in images]
if do_normalize:
a__ = [self.normalize(image=_lowerCamelCase ,mean=_lowerCamelCase ,std=_lowerCamelCase ) for image in images]
a__ = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
| 712 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 | 0 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : int ):
a__ = BigBirdConfig.from_json_file(__snake_case )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
a__ = BigBirdForQuestionAnswering(__snake_case )
else:
a__ = BigBirdForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__snake_case , __snake_case , is_trivia_qa=__snake_case )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
snake_case : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 713 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : str ):
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int="attention" ):
a__ = a__ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
a__ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
a__ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
a__ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
a__ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
a__ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
a__ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
a__ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple=False ):
if split_mlp_wi:
a__ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
a__ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
a__ = (wi_a, wi_a)
else:
a__ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
a__ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def __lowercase ( __lowerCAmelCase : List[str] , *, __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] = False ):
a__ = traverse_util.flatten_dict(variables['target'] )
a__ = {'/'.join(a__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
a__ = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , a__ )
a__ = collections.OrderedDict()
# Shared embeddings.
a__ = old['token_embedder/embedding']
# Encoder.
for i in range(a__ ):
# Block i, layer 0 (Self Attention).
a__ = tax_layer_norm_lookup(a__ , a__ , 'encoder' , 'pre_attention_layer_norm' )
a__ , a__ , a__ , a__ = tax_attention_lookup(a__ , a__ , 'encoder' , 'attention' )
a__ = layer_norm
a__ = k.T
a__ = o.T
a__ = q.T
a__ = v.T
# Block i, layer 1 (MLP).
a__ = tax_layer_norm_lookup(a__ , a__ , 'encoder' , 'pre_mlp_layer_norm' )
a__ , a__ = tax_mlp_lookup(a__ , a__ , 'encoder' , a__ )
a__ = layer_norm
if split_mlp_wi:
a__ = wi[0].T
a__ = wi[1].T
else:
a__ = wi.T
a__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
a__ = tax_relpos_bias_lookup(
a__ , a__ , 'encoder' ).T
a__ = old['encoder/encoder_norm/scale']
if not scalable_attention:
a__ = tax_relpos_bias_lookup(
a__ , 0 , 'encoder' ).T
a__ = tax_relpos_bias_lookup(
a__ , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(a__ ):
# Block i, layer 0 (Self Attention).
a__ = tax_layer_norm_lookup(a__ , a__ , 'decoder' , 'pre_self_attention_layer_norm' )
a__ , a__ , a__ , a__ = tax_attention_lookup(a__ , a__ , 'decoder' , 'self_attention' )
a__ = layer_norm
a__ = k.T
a__ = o.T
a__ = q.T
a__ = v.T
# Block i, layer 1 (Cross Attention).
a__ = tax_layer_norm_lookup(a__ , a__ , 'decoder' , 'pre_cross_attention_layer_norm' )
a__ , a__ , a__ , a__ = tax_attention_lookup(a__ , a__ , 'decoder' , 'encoder_decoder_attention' )
a__ = layer_norm
a__ = k.T
a__ = o.T
a__ = q.T
a__ = v.T
# Block i, layer 2 (MLP).
a__ = tax_layer_norm_lookup(a__ , a__ , 'decoder' , 'pre_mlp_layer_norm' )
a__ , a__ = tax_mlp_lookup(a__ , a__ , 'decoder' , a__ )
a__ = layer_norm
if split_mlp_wi:
a__ = wi[0].T
a__ = wi[1].T
else:
a__ = wi.T
a__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
a__ = tax_relpos_bias_lookup(a__ , a__ , 'decoder' ).T
a__ = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
a__ = old['decoder/logits_dense/kernel'].T
return new
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ):
a__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
a__ = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
a__ = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
a__ = state_dict['shared.weight']
return state_dict
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
a__ = checkpoints.load_tax_checkpoint(a__ )
a__ = convert_tax_to_pytorch(
a__ , num_layers=config.num_layers , is_encoder_only=a__ , scalable_attention=a__ )
a__ = make_state_dict(a__ , a__ )
model.load_state_dict(a__ , strict=a__ )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any = False , __lowerCAmelCase : Tuple = False , ):
a__ = MTaConfig.from_json_file(a__ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
a__ = UMTaEncoderModel(a__ )
else:
a__ = UMTaForConditionalGeneration(a__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(a__ , a__ , a__ , a__ , a__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(a__ )
# Verify that we can load the checkpoint.
model.from_pretrained(a__ )
print('Done' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
snake_case : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 714 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 715 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 0 |
def __lowercase ( __lowerCAmelCase : Dict ):
return " ".join(
''.join(word[::-1] ) if len(_UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 716 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 0 |
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
while second != 0:
a__ = first & second
first ^= second
a__ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : List[str] = int(input('''Enter the first number: ''').strip())
snake_case : str = int(input('''Enter the second number: ''').strip())
print(f"""{add(first, second) = }""")
| 717 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class snake_case_ (datasets.BeamBasedBuilder ):
def lowerCamelCase__( self :Dict ) -> int:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) ,supervised_keys=__A ,)
def lowerCamelCase__( self :Optional[int] ,__snake_case :Any ,__snake_case :List[str] ) -> str:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'examples': get_test_dummy_examples()} )]
def lowerCamelCase__( self :List[str] ,__snake_case :str ,__snake_case :List[str] ) -> Dict:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
class snake_case_ (datasets.BeamBasedBuilder ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) ,supervised_keys=__A ,)
def lowerCamelCase__( self :int ,__snake_case :str ,__snake_case :str ) -> Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'examples': get_test_nested_examples()} )
]
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Dict ,__snake_case :int ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
def __lowercase ( ):
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def __lowercase ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class snake_case_ (lowerCamelCase_ ):
@require_beam
def lowerCamelCase__( self :Union[str, Any] ) -> str:
a__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a__ = DummyBeamDataset(cache_dir=__A ,beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A ,builder.name ,'default' ,'0.0.0' ,F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features ,datasets.Features({'content': datasets.Value('string' )} ) )
a__ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows ,__A )
self.assertEqual(dset['train'].info.splits['train'].num_examples ,__A )
self.assertDictEqual(dset['train'][0] ,get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] ,get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A ,builder.name ,'default' ,'0.0.0' ,'dataset_info.json' ) ) )
del dset
@require_beam
def lowerCamelCase__( self :Tuple ) -> Dict:
import apache_beam as beam
a__ = beam.io.parquetio.WriteToParquet
a__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a__ = DummyBeamDataset(cache_dir=__A ,beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
a__ = partial(__A ,num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__A ,builder.name ,'default' ,'0.0.0' ,F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__A ,builder.name ,'default' ,'0.0.0' ,F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features ,datasets.Features({'content': datasets.Value('string' )} ) )
a__ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows ,__A )
self.assertEqual(dset['train'].info.splits['train'].num_examples ,__A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) ,sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(__A ,builder.name ,'default' ,'0.0.0' ,'dataset_info.json' ) ) )
del dset
@require_beam
def lowerCamelCase__( self :List[str] ) -> int:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a__ = DummyBeamDataset(cache_dir=__A )
self.assertRaises(datasets.builder.MissingBeamOptions ,builder.download_and_prepare )
@require_beam
def lowerCamelCase__( self :List[str] ) -> List[Any]:
a__ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a__ = NestedBeamDataset(cache_dir=__A ,beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A ,builder.name ,'default' ,'0.0.0' ,F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features ,datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
a__ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows ,__A )
self.assertEqual(dset['train'].info.splits['train'].num_examples ,__A )
self.assertDictEqual(dset['train'][0] ,get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] ,get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A ,builder.name ,'default' ,'0.0.0' ,'dataset_info.json' ) ) )
del dset
| 718 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
a__ = [[] for _ in range(_lowerCamelCase )]
a__ = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(_lowerCamelCase ) <= key:
return input_string
for position, character in enumerate(_lowerCamelCase ):
a__ = position % (lowest * 2) # puts it in bounds
a__ = min(_lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_lowerCamelCase )
a__ = ["""""".join(_lowerCamelCase ) for row in temp_grid]
a__ = """""".join(_lowerCamelCase )
return output_string
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
a__ = []
a__ = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
a__ = [[] for _ in range(_lowerCamelCase )] # generates template
for position in range(len(_lowerCamelCase ) ):
a__ = position % (lowest * 2) # puts it in bounds
a__ = min(_lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
a__ = 0
for row in temp_grid: # fills in the characters
a__ = input_string[counter : counter + len(_lowerCamelCase )]
grid.append(list(_lowerCamelCase ) )
counter += len(_lowerCamelCase )
a__ = """""" # reads as zigzag
for position in range(len(_lowerCamelCase ) ):
a__ = position % (lowest * 2) # puts it in bounds
a__ = min(_lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __lowercase ( __lowerCAmelCase : str ):
a__ = {}
for key_guess in range(1 , len(_lowerCamelCase ) ): # tries every key
a__ = decrypt(_lowerCamelCase , _lowerCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = ['''pixel_values''']
def __init__( self :str ,__snake_case :int = True ,__snake_case :Optional[int] = 32 ,__snake_case :int=PILImageResampling.BILINEAR ,__snake_case :Optional[Any] = True ,**__snake_case :int ,) -> Any:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**A__ )
def lowerCamelCase__( self :int ,__snake_case :List[Any] ,__snake_case :List[str] ,__snake_case :Tuple ,__snake_case :int = None ,**__snake_case :int ) -> Optional[int]:
a__ = get_image_size(A__ )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(A__ ,(new_h, new_w) ,resample=A__ ,data_format=A__ ,**A__ )
return image
def lowerCamelCase__( self :Optional[int] ,__snake_case :Optional[int] ,__snake_case :str ,__snake_case :List[str] = None ,**__snake_case :int ) -> Optional[int]:
return rescale(image=A__ ,scale=A__ ,data_format=A__ ,**A__ )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Tuple ,__snake_case :Optional[Any] = None ,__snake_case :str = None ,__snake_case :Optional[Any]=None ,__snake_case :str = None ,__snake_case :List[str] = None ,__snake_case :Optional[Any] = ChannelDimension.FIRST ,**__snake_case :Optional[int] ,) -> Tuple:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(A__ ) for img in images]
if do_resize:
a__ = [self.resize(A__ ,size_divisor=A__ ,resample=A__ ) for image in images]
if do_rescale:
a__ = [self.rescale(A__ ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(A__ ,A__ ) for image in images]
a__ = {"""pixel_values""": images}
return BatchFeature(data=A__ ,tensor_type=A__ )
| 720 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
snake_case : Optional[int] = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ):
warnings.warn(lowerCAmelCase__ , lowerCAmelCase__ )
requires_backends(lowerCAmelCase__ , 'sklearn' )
return (preds == labels).mean()
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ):
warnings.warn(lowerCAmelCase__ , lowerCAmelCase__ )
requires_backends(lowerCAmelCase__ , 'sklearn' )
a__ = simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )
a__ = fa_score(y_true=lowerCAmelCase__ , y_pred=lowerCAmelCase__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
warnings.warn(lowerCAmelCase__ , lowerCAmelCase__ )
requires_backends(lowerCAmelCase__ , 'sklearn' )
a__ = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0]
a__ = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
warnings.warn(lowerCAmelCase__ , lowerCAmelCase__ )
requires_backends(lowerCAmelCase__ , 'sklearn' )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ), F'Predictions and labels have mismatched lengths {len(lowerCAmelCase__ )} and {len(lowerCAmelCase__ )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "mrpc":
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCAmelCase__ , lowerCAmelCase__ )
elif task_name == "qqp":
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(lowerCAmelCase__ )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int ):
warnings.warn(lowerCAmelCase__ , lowerCAmelCase__ )
requires_backends(lowerCAmelCase__ , 'sklearn' )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(F'Predictions and labels have mismatched lengths {len(lowerCAmelCase__ )} and {len(lowerCAmelCase__ )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(lowerCAmelCase__ )
| 721 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class snake_case_ :
UpperCAmelCase__ : int = None
def lowerCamelCase__( self :Dict ) -> Dict:
a__ = self.feature_extraction_class(**self.feat_extract_dict )
a__ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] ,UpperCamelCase__ )
def lowerCamelCase__( self :Optional[int] ) -> int:
a__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(UpperCamelCase__ ,'feat_extract.json' )
feat_extract_first.to_json_file(UpperCamelCase__ )
a__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() ,feat_extract_first.to_dict() )
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
a__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
a__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() ,feat_extract_first.to_dict() )
def lowerCamelCase__( self :int ) -> Any:
a__ = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 700 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __lowercase ( __lowerCAmelCase : Any ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
def __lowercase ( __lowerCAmelCase : List[str] ):
# word like '180' or '身高' or '神'
for char in word:
a__ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def __lowercase ( __lowerCAmelCase : List[str] ):
a__ = set()
for token in tokens:
a__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
a__ = list(__lowerCAmelCase )
return word_list
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
if not chinese_word_set:
return bert_tokens
a__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
a__ = bert_tokens
a__ , a__ = 0, len(__lowerCAmelCase )
while start < end:
a__ = True
if is_chinese(bert_word[start] ):
a__ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
a__ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a__ = '##' + bert_word[j]
a__ = start + i
a__ = False
break
if single_word:
start += 1
return bert_word
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Any ):
a__ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_0_0 ):
a__ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a__ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
a__ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_0_0 ):
a__ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
a__ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
a__ = []
for id in input_ids:
a__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
a__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
a__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
a__ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def __lowercase ( __lowerCAmelCase : List[Any] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a__ = f.readlines()
a__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a__ = LTP(args.ltp ) # faster in GPU device
a__ = BertTokenizer.from_pretrained(args.bert )
a__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a__ = [json.dumps(__lowerCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : Dict = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
snake_case : Dict = parser.parse_args()
main(args)
| 701 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 0 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case : int = logging.getLogger()
def __lowercase ( __lowerCAmelCase : Path , __lowerCAmelCase : list ):
a__ = "\n".join(SCREAMING_SNAKE_CASE_ )
Path(SCREAMING_SNAKE_CASE_ ).open('w' ).writelines(SCREAMING_SNAKE_CASE_ )
snake_case : int = '''patrickvonplaten/t5-tiny-random'''
snake_case : str = '''sshleifer/bart-tiny-random'''
snake_case : Optional[int] = '''sshleifer/tiny-mbart'''
snake_case : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class snake_case_ (_UpperCAmelCase ):
def lowerCamelCase__( self :Tuple ,__snake_case :int ) -> Dict:
a__ = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
a__ = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
a__ = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowercase__ ,lowercase__ )
a__ = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
a__ = "translation_en_to_de" if model == T5_TINY else "summarization"
a__ = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(lowercase__ ,'argv' ,lowercase__ ):
run_generate()
assert Path(lowercase__ ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
self.run_eval_tester(lowercase__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ) -> Dict:
self.run_eval_tester(lowercase__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase__( self :str ,__snake_case :Union[str, Any] ) -> int:
a__ = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
a__ = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
a__ = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
a__ = Path(self.get_auto_remove_tmp_dir() )
a__ = str(tmp_dir / 'scores.json' )
a__ = str(tmp_dir / 'val.target' )
_dump_articles(lowercase__ ,text['en'] )
_dump_articles(lowercase__ ,text['de'] )
a__ = "translation_en_to_de" if model == T5_TINY else "summarization"
a__ = F'\n run_eval_search.py\n {model}\n {str(lowercase__ )}\n {str(lowercase__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(lowercase__ ,'argv' ,lowercase__ ):
with CaptureStdout() as cs:
run_search()
a__ = [" num_beams | length_penalty", model, "Best score args"]
a__ = ["Info"]
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(lowercase__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowercase__ ).exists()
os.remove(Path(lowercase__ ) )
| 702 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case : Union[str, Any] = Lock()
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(SCREAMING_SNAKE_CASE_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
a__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
a__ = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(SCREAMING_SNAKE_CASE_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
a__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
a__ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(SCREAMING_SNAKE_CASE_ )
def __lowercase ( __lowerCAmelCase : List[Any] ):
a__ = []
a__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
a__ = Pipe()
a__ = Pipe()
process_array_.append(
Process(
target=SCREAMING_SNAKE_CASE_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
a__ = temp_rs
a__ = temp_rr
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
a__ = Pipe()
a__ = Pipe()
process_array_.append(
Process(
target=SCREAMING_SNAKE_CASE_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
a__ = temp_rs
a__ = temp_rr
process_array_.append(
Process(
target=SCREAMING_SNAKE_CASE_ , args=(
len(SCREAMING_SNAKE_CASE_ ) - 1,
arr[len(SCREAMING_SNAKE_CASE_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(SCREAMING_SNAKE_CASE_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
a__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowercase ( ):
a__ = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*SCREAMING_SNAKE_CASE_ )
a__ = odd_even_transposition(SCREAMING_SNAKE_CASE_ )
print('Sorted List\n' )
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 703 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
def __init__( self :int ,__snake_case :str ,__snake_case :int=13 ,__snake_case :str=7 ,__snake_case :Any=True ,__snake_case :str=True ,__snake_case :Dict=True ,__snake_case :int=True ,__snake_case :Dict=True ,__snake_case :Optional[Any]=False ,__snake_case :Optional[Any]=False ,__snake_case :Optional[int]=False ,__snake_case :Dict=2 ,__snake_case :Any=99 ,__snake_case :Optional[Any]=0 ,__snake_case :Tuple=32 ,__snake_case :Optional[Any]=5 ,__snake_case :Dict=4 ,__snake_case :int=0.1 ,__snake_case :Dict=0.1 ,__snake_case :int=5_12 ,__snake_case :int=2 ,__snake_case :Optional[int]=0.02 ,__snake_case :Optional[int]=2 ,__snake_case :Any=4 ,__snake_case :Tuple="last" ,__snake_case :Optional[Any]=True ,__snake_case :str=None ,__snake_case :List[Any]=0 ,) -> Optional[Any]:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_lengths
a__ = use_token_type_ids
a__ = use_labels
a__ = gelu_activation
a__ = sinusoidal_embeddings
a__ = causal
a__ = asm
a__ = n_langs
a__ = vocab_size
a__ = n_special
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = summary_type
a__ = use_proj
a__ = scope
a__ = bos_token_id
def lowerCamelCase__( self :str ) -> List[Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_input_lengths:
a__ = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,2 ).float()
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Union[str, Any] ,__snake_case :List[str] ,__snake_case :Optional[int] ,__snake_case :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :List[str] ,__snake_case :Optional[Any] ,) -> str:
a__ = XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,lengths=__snake_case ,langs=__snake_case )
a__ = model(__snake_case ,langs=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :Dict ,__snake_case :Dict ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :List[str] ,__snake_case :Tuple ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :Tuple ,__snake_case :str ,) -> str:
a__ = XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[int] ,__snake_case :Union[str, Any] ,__snake_case :Any ,__snake_case :str ,__snake_case :Tuple ,__snake_case :Union[str, Any] ,__snake_case :List[str] ,__snake_case :Optional[Any] ,__snake_case :Optional[Any] ,__snake_case :List[str] ,) -> List[str]:
a__ = XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case )
a__ = model(__snake_case ,start_positions=__snake_case ,end_positions=__snake_case )
a__ = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCamelCase__( self :Dict ,__snake_case :Optional[int] ,__snake_case :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Tuple ,__snake_case :Dict ,__snake_case :Tuple ,__snake_case :Any ,__snake_case :Optional[int] ,__snake_case :Union[str, Any] ,) -> Union[str, Any]:
a__ = XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case )
a__ = model(
__snake_case ,start_positions=__snake_case ,end_positions=__snake_case ,cls_index=__snake_case ,is_impossible=__snake_case ,p_mask=__snake_case ,)
a__ = model(
__snake_case ,start_positions=__snake_case ,end_positions=__snake_case ,cls_index=__snake_case ,is_impossible=__snake_case ,)
(a__ ) = result_with_labels.to_tuple()
a__ = model(__snake_case ,start_positions=__snake_case ,end_positions=__snake_case )
(a__ ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :str ,__snake_case :int ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,) -> Union[str, Any]:
a__ = XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case )
a__ = model(__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__( self :List[Any] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :Dict ,__snake_case :Any ,__snake_case :List[Any] ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :str ,__snake_case :Optional[Any] ,) -> Tuple:
a__ = self.num_labels
a__ = XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,attention_mask=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__( self :Any ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :List[Any] ,__snake_case :List[Any] ,__snake_case :List[Any] ,__snake_case :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,) -> List[Any]:
a__ = self.num_choices
a__ = XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
a__ = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
a__ = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
a__ = model(
__snake_case ,attention_mask=__snake_case ,token_type_ids=__snake_case ,labels=__snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def lowerCamelCase__( self :Optional[int] ) -> Optional[int]:
a__ = self.prepare_config_and_inputs()
(
a__
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class snake_case_ (__A , __A , __A , unittest.TestCase ):
UpperCAmelCase__ : List[str] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Optional[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Any ,__snake_case :int ,__snake_case :List[Any] ,__snake_case :List[Any] ,__snake_case :int ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase__( self :str ,__snake_case :Union[str, Any] ,__snake_case :Union[str, Any] ,__snake_case :List[str]=False ) -> List[Any]:
a__ = super()._prepare_for_class(__snake_case ,__snake_case ,return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__snake_case )
a__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__snake_case )
return inputs_dict
def lowerCamelCase__( self :Dict ) -> Union[str, Any]:
a__ = XLMModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,emb_dim=37 )
def lowerCamelCase__( self :List[Any] ) -> List[str]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def lowerCamelCase__( self :int ) -> Dict:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Dict:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def lowerCamelCase__( self :Any ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def lowerCamelCase__( self :Dict ) -> List[str]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> int:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :List[Any] ,__snake_case :Any ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any]=False ,__snake_case :List[str]=1 ) -> Union[str, Any]:
self.assertIsInstance(__snake_case ,__snake_case )
self.assertListEqual(
[isinstance(__snake_case ,__snake_case ) for iter_attentions in attentions] ,[True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
a__ = min_length + idx + 1
a__ = min_length + idx + 1
a__ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(__snake_case ) )
def lowerCamelCase__( self :Tuple ,__snake_case :Optional[int] ,__snake_case :Optional[int] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :Union[str, Any] ,__snake_case :Dict=False ,__snake_case :int=1 ) -> Any:
self.assertIsInstance(__snake_case ,__snake_case )
self.assertListEqual(
[isinstance(__snake_case ,__snake_case ) for iter_hidden_states in hidden_states] ,[True] * len(__snake_case ) ,)
self.assertEqual(len(__snake_case ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
a__ = min_length + idx + 1
a__ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(__snake_case ) ,)
pass
@slow
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :Tuple ) -> Tuple:
a__ = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__snake_case )
a__ = torch.tensor([[14, 4_47]] ,dtype=torch.long ,device=__snake_case ) # the president
a__ = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,__snake_case )
| 704 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Dict = '''▁'''
snake_case : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
snake_case : Union[str, Any] = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
snake_case : Optional[Any] = {
'''facebook/mbart-large-50-one-to-many-mmt''': 10_24,
}
# fmt: off
snake_case : str = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class snake_case_ (__lowercase ):
UpperCAmelCase__ : str = VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Optional[Any] = []
def __init__( self :Tuple ,__snake_case :str ,__snake_case :Any=None ,__snake_case :List[Any]=None ,__snake_case :List[str]="</s>" ,__snake_case :Optional[int]="</s>" ,__snake_case :List[Any]="<s>" ,__snake_case :int="<unk>" ,__snake_case :List[Any]="<pad>" ,__snake_case :List[str]="<mask>" ,__snake_case :Optional[Dict[str, Any]] = None ,**__snake_case :List[Any] ,) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
a__ = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
a__ = {} if sp_model_kwargs is None else sp_model_kwargs
a__ = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_A ,tgt_lang=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,cls_token=_A ,pad_token=_A ,mask_token=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
a__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__ = 1
a__ = len(self.sp_model )
a__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A )
}
a__ = {v: k for k, v in self.lang_code_to_id.items()}
a__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a__ = src_lang if src_lang is not None else 'en_XX'
a__ = self.lang_code_to_id[self._src_lang]
a__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase__( self :List[str] ) -> Any:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase__( self :Optional[int] ) -> str:
return self._src_lang
@src_lang.setter
def lowerCamelCase__( self :Dict ,__snake_case :str ) -> List[Any]:
a__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self :List[str] ) -> int:
a__ = self.__dict__.copy()
a__ = None
return state
def __setstate__( self :List[Any] ,__snake_case :Dict ) -> Optional[int]:
a__ = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
a__ = {}
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__( self :Any ) -> str:
a__ = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__( self :str ,__snake_case :str ) -> Tuple:
return self.sp_model.encode(_A ,out_type=_A )
def lowerCamelCase__( self :Dict ,__snake_case :str ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__( self :int ,__snake_case :int ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__( self :List[Any] ,__snake_case :str ) -> Tuple:
a__ = []
a__ = ''
a__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
a__ = True
a__ = []
else:
current_sub_tokens.append(_A )
a__ = False
out_string += self.sp_model.decode(_A )
return out_string.strip()
def lowerCamelCase__( self :Optional[int] ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Any:
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
a__ = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def lowerCamelCase__( self :Dict ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ,__snake_case :bool = False ) -> Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
a__ = [1] * len(self.prefix_tokens )
a__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def lowerCamelCase__( self :Tuple ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[str]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :str ,__snake_case :Optional[str] ,__snake_case :Optional[str] ,**__snake_case :List[Any] ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
a__ = src_lang
a__ = self(_A ,add_special_tokens=_A ,return_tensors=_A ,**_A )
a__ = self.convert_tokens_to_ids(_A )
a__ = tgt_lang_id
return inputs
def lowerCamelCase__( self :int ,__snake_case :List[str] ,__snake_case :str = "en_XX" ,__snake_case :Optional[List[str]] = None ,__snake_case :str = "ro_RO" ,**__snake_case :List[str] ,) -> Optional[int]:
a__ = src_lang
a__ = tgt_lang
return super().prepare_seqaseq_batch(_A ,_A ,**_A )
def lowerCamelCase__( self :str ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__( self :List[Any] ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__( self :Tuple ,__snake_case :str ) -> str:
a__ = self.lang_code_to_id[src_lang]
a__ = [self.cur_lang_code_id]
a__ = [self.eos_token_id]
def lowerCamelCase__( self :List[str] ,__snake_case :str ) -> Optional[int]:
a__ = self.lang_code_to_id[tgt_lang]
a__ = [self.cur_lang_code_id]
a__ = [self.eos_token_id]
| 705 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
import os
from pathlib import Path
def __lowercase ( ):
from torch.utils.cpp_extension import load
a__ = Path(lowerCAmelCase__ ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
a__ = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , lowerCAmelCase__ , with_cuda=lowerCAmelCase__ , extra_include_paths=[str(lowerCAmelCase__ )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 706 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 0 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
snake_case = True
except ImportError:
snake_case = False
snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowercase ( __lowerCAmelCase : int ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class snake_case_ (__lowerCamelCase ):
@staticmethod
def lowerCamelCase__( __snake_case :Optional[Any] ) -> Optional[int]:
a__ = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=a_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=a_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=a_ )
def __init__( self :str ,__snake_case :List[Any] ,__snake_case :str ,__snake_case :List[Any]=None ,*__snake_case :Any ) -> Optional[Any]:
a__ = testing
a__ = testing_file
a__ = path
def lowerCamelCase__( self :List[str] ) -> List[str]:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
a__ = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(a_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
a__ = (
Path(a_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
a__ = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(a_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
a__ = json.load(a_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=a_ ,extra_context=a_ ,)
a__ = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
a__ = json.load(a_ )
a__ = configuration["lowercase_modelname"]
a__ = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(F'{directory}/configuration.json' )
a__ = "PyTorch" in generate_tensorflow_pytorch_and_flax
a__ = "TensorFlow" in generate_tensorflow_pytorch_and_flax
a__ = "Flax" in generate_tensorflow_pytorch_and_flax
a__ = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(a_ ,exist_ok=a_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=a_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(__snake_case :str ):
with open(a_ ,'r' ) as f:
a__ = f.readlines()
with open(a_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(a_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__snake_case :Optional[Any] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ):
# Create temp file
a__ = mkstemp()
a__ = False
with fdopen(a_ ,'w' ) as new_file:
with open(a_ ) as old_file:
for line in old_file:
new_file.write(a_ )
if line_to_copy_below in line:
a__ = True
for line_to_copy in lines_to_copy:
new_file.write(a_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(a_ ,a_ )
# Remove original file
remove(a_ )
# Move new file
move(a_ ,a_ )
def skip_units(__snake_case :Optional[Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__snake_case :int ):
with open(a_ ) as datafile:
a__ = []
a__ = False
a__ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
a__ = line.split('\"' )[1]
a__ = skip_units(a_ )
elif "# Below: " in line and "##" not in line:
a__ = line.split('\"' )[1]
a__ = skip_units(a_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(a_ ,a_ ,a_ )
a__ = []
elif "# Replace with" in line and "##" not in line:
a__ = []
elif "##" not in line:
lines_to_copy.append(a_ )
remove(a_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(a_ )
| 707 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 0 |
from abc import ABC, abstractmethod
from typing import List, Optional
class snake_case_ (lowerCamelCase_ ):
def __init__( self :int ) -> Dict:
# test for the above condition
self.test()
def lowerCamelCase__( self :int ) -> Optional[Any]:
a__ = 0
a__ = False
while not completed:
if counter == 1:
self.reset()
a__ = self.advance()
if not self.does_advance(__snake_case ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
a__ , a__ , a__ = self.update(__snake_case )
counter += 1
if counter > 1_00_00:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def lowerCamelCase__( self :List[Any] ) -> Tuple:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[str] ) -> int:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ) -> List[Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__( self :List[str] ) -> str:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__( self :Any ) -> Dict:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__( self :List[str] ,__snake_case :Tuple=False ) -> Optional[Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class snake_case_ (lowerCamelCase_ ):
def __init__( self :int ,__snake_case :Union[str, Any] ) -> Any:
super(__snake_case ,self ).__init__()
if not isinstance(__snake_case ,__snake_case ) or len(__snake_case ) == 0:
raise ValueError(F'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(__snake_case ,__snake_case ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
a__ = token_ids
a__ = len(self.token_ids )
a__ = -1 # the index of the currently fulfilled step
a__ = False
def lowerCamelCase__( self :int ) -> Union[str, Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__( self :int ,__snake_case :Optional[int] ) -> int:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(__snake_case )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__( self :List[Any] ,__snake_case :str ) -> Any:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(__snake_case )}' )
a__ = False
a__ = False
a__ = False
if self.does_advance(__snake_case ):
self.fulfilled_idx += 1
a__ = True
if self.fulfilled_idx == (self.seqlen - 1):
a__ = True
a__ = completed
else:
# failed to make progress.
a__ = True
self.reset()
return stepped, completed, reset
def lowerCamelCase__( self :int ) -> Union[str, Any]:
a__ = False
a__ = 0
def lowerCamelCase__( self :List[str] ) -> Union[str, Any]:
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :List[str]=False ) -> List[str]:
a__ = PhrasalConstraint(self.token_ids )
if stateful:
a__ = self.seqlen
a__ = self.fulfilled_idx
a__ = self.completed
return new_constraint
class snake_case_ :
def __init__( self :int ,__snake_case :Optional[Any] ,__snake_case :List[Any]=True ) -> List[Any]:
a__ = max([len(__snake_case ) for one in nested_token_ids] )
a__ = {}
for token_ids in nested_token_ids:
a__ = root
for tidx, token_id in enumerate(__snake_case ):
if token_id not in level:
a__ = {}
a__ = level[token_id]
if no_subsets and self.has_subsets(__snake_case ,__snake_case ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F' {nested_token_ids}.' )
a__ = root
def lowerCamelCase__( self :Tuple ,__snake_case :List[Any] ) -> Optional[int]:
a__ = self.trie
for current_token in current_seq:
a__ = start[current_token]
a__ = list(start.keys() )
return next_tokens
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ) -> Optional[int]:
a__ = self.next_tokens(__snake_case )
return len(__snake_case ) == 0
def lowerCamelCase__( self :List[str] ,__snake_case :Any ) -> Union[str, Any]:
a__ = list(root.values() )
if len(__snake_case ) == 0:
return 1
else:
return sum([self.count_leaves(__snake_case ) for nn in next_nodes] )
def lowerCamelCase__( self :Tuple ,__snake_case :List[Any] ,__snake_case :str ) -> str:
a__ = self.count_leaves(__snake_case )
return len(__snake_case ) != leaf_count
class snake_case_ (lowerCamelCase_ ):
def __init__( self :List[str] ,__snake_case :Optional[Any] ) -> List[str]:
super(__snake_case ,self ).__init__()
if not isinstance(__snake_case ,__snake_case ) or len(__snake_case ) == 0:
raise ValueError(F'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(__snake_case ,__snake_case ) for token_ids in nested_token_ids ):
raise ValueError(F'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(__snake_case ,__snake_case ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
a__ = DisjunctiveTrie(__snake_case )
a__ = nested_token_ids
a__ = self.trie.max_height
a__ = []
a__ = False
def lowerCamelCase__( self :Dict ) -> List[Any]:
a__ = self.trie.next_tokens(self.current_seq )
if len(__snake_case ) == 0:
return None
else:
return token_list
def lowerCamelCase__( self :str ,__snake_case :Dict ) -> int:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__snake_case )}' )
a__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase__( self :Any ,__snake_case :Optional[int] ) -> str:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__snake_case )}' )
a__ = False
a__ = False
a__ = False
if self.does_advance(__snake_case ):
self.current_seq.append(__snake_case )
a__ = True
else:
a__ = True
self.reset()
a__ = self.trie.reached_leaf(self.current_seq )
a__ = completed
return stepped, completed, reset
def lowerCamelCase__( self :Dict ) -> int:
a__ = False
a__ = []
def lowerCamelCase__( self :Union[str, Any] ) -> Union[str, Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase__( self :Any ,__snake_case :int=False ) -> List[Any]:
a__ = DisjunctiveConstraint(self.token_ids )
if stateful:
a__ = self.seqlen
a__ = self.current_seq
a__ = self.completed
return new_constraint
class snake_case_ :
def __init__( self :Tuple ,__snake_case :int ) -> Optional[Any]:
a__ = constraints
# max # of steps required to fulfill a given constraint
a__ = max([c.seqlen for c in constraints] )
a__ = len(__snake_case )
a__ = False
self.init_state()
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
a__ = []
a__ = None
a__ = [constraint.copy(stateful=__snake_case ) for constraint in self.constraints]
def lowerCamelCase__( self :str ) -> Any:
a__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase__( self :Dict ) -> Optional[Any]:
a__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
a__ = constraint.advance()
if isinstance(__snake_case ,__snake_case ):
token_list.append(__snake_case )
elif isinstance(__snake_case ,__snake_case ):
token_list.extend(__snake_case )
else:
a__ = self.inprogress_constraint.advance()
if isinstance(__snake_case ,__snake_case ):
token_list.append(__snake_case )
elif isinstance(__snake_case ,__snake_case ):
token_list.extend(__snake_case )
if len(__snake_case ) == 0:
return None
else:
return token_list
def lowerCamelCase__( self :Any ,__snake_case :Any ) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
a__ , a__ = self.add(__snake_case )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase__( self :Any ,__snake_case :int ) -> str:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError(F'`token_id` should be an `int`, but is `{token_id}`.' )
a__ , a__ = False, False
if self.completed:
a__ = True
a__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
a__ , a__ , a__ = self.inprogress_constraint.update(__snake_case )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__snake_case ) )
a__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
a__ = None
if len(self.pending_constraints ) == 0:
# we're done!
a__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__snake_case ):
a__ , a__ , a__ = pending_constraint.update(__snake_case )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(__snake_case )
a__ = None
if not complete and stepped:
a__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
a__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
a__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase__( self :str ,__snake_case :List[str]=True ) -> Union[str, Any]:
a__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
a__ = [
constraint.copy(stateful=__snake_case ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
a__ = self.inprogress_constraint.copy(stateful=__snake_case )
a__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 708 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : List[Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = ['ConditionalDetrFeatureExtractor']
snake_case : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
snake_case : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 0 |
def A ( __lowerCAmelCase : Optional[int] ):
a__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
a__ = set()
return any(
node not in visited and depth_first_search(A_ , A_ , A_ , A_ )
for node in graph )
def A ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str ):
visited.add(A_ )
rec_stk.add(A_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A_ , A_ , A_ , A_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 710 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case : List[str] = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : List[Any] ):
if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCAmelCase ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class snake_case_ (snake_case__ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Any ,__snake_case :Any = True ,__snake_case :Optional[Any] = None ,__snake_case :Tuple = PILImageResampling.BILINEAR ,__snake_case :str = True ,__snake_case :Optional[int] = None ,__snake_case :Optional[Any] = True ,__snake_case :Tuple = 1 / 2_55 ,__snake_case :Tuple = True ,__snake_case :Union[str, Any] = None ,__snake_case :Dict = None ,**__snake_case :Optional[int] ,) -> Tuple:
super().__init__(**_A )
a__ = size if size is not None else {'shortest_edge': 2_24}
a__ = get_size_dict(_A ,default_to_square=_A )
a__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
a__ = get_size_dict(_A ,param_name='crop_size' )
a__ = do_resize
a__ = size
a__ = do_center_crop
a__ = crop_size
a__ = resample
a__ = do_rescale
a__ = rescale_factor
a__ = do_normalize
a__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :int = PILImageResampling.BILINEAR ,__snake_case :Optional[int] = None ,**__snake_case :List[Any] ,) -> Any:
a__ = get_size_dict(_A ,default_to_square=_A )
if "shortest_edge" in size:
a__ = get_resize_output_image_size(_A ,size['shortest_edge'] ,default_to_square=_A )
elif "height" in size and "width" in size:
a__ = (size['height'], size['width'])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(_A ,size=_A ,resample=_A ,data_format=_A ,**_A )
def lowerCamelCase__( self :List[Any] ,__snake_case :int ,__snake_case :Dict ,__snake_case :int = None ,**__snake_case :Union[str, Any] ,) -> List[Any]:
a__ = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(_A ,size=(size['height'], size['width']) ,data_format=_A ,**_A )
def lowerCamelCase__( self :Dict ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :List[str] = None ,**__snake_case :Optional[Any] ,) -> Tuple:
return rescale(_A ,scale=_A ,data_format=_A ,**_A )
def lowerCamelCase__( self :int ,__snake_case :List[str] ,__snake_case :List[str] ,__snake_case :str ,__snake_case :Tuple = None ,**__snake_case :List[str] ,) -> Union[str, Any]:
return normalize(_A ,mean=_A ,std=_A ,data_format=_A ,**_A )
def lowerCamelCase__( self :Tuple ,__snake_case :int ,__snake_case :Any = None ,__snake_case :str = None ,__snake_case :List[Any] = None ,__snake_case :List[Any] = None ,__snake_case :Dict = None ,__snake_case :Optional[Any] = None ,__snake_case :Optional[int] = None ,__snake_case :Any = None ,__snake_case :Dict = None ,__snake_case :Union[str, Any] = None ,__snake_case :Dict = ChannelDimension.FIRST ,) -> int:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
a__ = to_numpy_array(_A )
if do_resize:
a__ = self.resize(image=_A ,size=_A ,resample=_A )
if do_center_crop:
a__ = self.center_crop(_A ,size=_A )
if do_rescale:
a__ = self.rescale(image=_A ,scale=_A )
if do_normalize:
a__ = self.normalize(image=_A ,mean=_A ,std=_A )
a__ = to_channel_dimension_format(_A ,_A )
return image
def lowerCamelCase__( self :List[Any] ,__snake_case :Optional[int] ,__snake_case :List[Any] = None ,__snake_case :Tuple = None ,__snake_case :Optional[int] = None ,__snake_case :Any = None ,__snake_case :str = None ,__snake_case :Tuple = None ,__snake_case :List[Any] = None ,__snake_case :Optional[Any] = None ,__snake_case :int = None ,__snake_case :Any = None ,__snake_case :Tuple = None ,__snake_case :List[Any] = ChannelDimension.FIRST ,**__snake_case :List[str] ,) -> Any:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = resample if resample is not None else self.resample
a__ = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = image_mean if image_mean is not None else self.image_mean
a__ = image_std if image_std is not None else self.image_std
a__ = size if size is not None else self.size
a__ = get_size_dict(_A ,default_to_square=_A )
a__ = crop_size if crop_size is not None else self.crop_size
a__ = get_size_dict(_A ,param_name='crop_size' )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
a__ = make_batched(_A )
a__ = [
[
self._preprocess_image(
image=_A ,do_resize=_A ,size=_A ,resample=_A ,do_center_crop=_A ,crop_size=_A ,do_rescale=_A ,rescale_factor=_A ,do_normalize=_A ,image_mean=_A ,image_std=_A ,data_format=_A ,)
for img in video
]
for video in videos
]
a__ = {'pixel_values': videos}
return BatchFeature(data=_A ,tensor_type=_A )
| 711 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 | 0 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class snake_case_ (lowercase_ ):
def __lt__( self :Optional[int] ,__snake_case :List[str] ) -> Dict:
return self[-1] < other[-1]
def __eq__( self :List[str] ,__snake_case :Dict ) -> Tuple:
return self[-1] == other[-1]
def __lowercase ( __lowerCAmelCase : Dict ):
a__ = []
# sort into stacks
for element in collection:
a__ = Stack([element] )
a__ = bisect_left(lowerCamelCase_ , lowerCamelCase_ )
if i != len(lowerCamelCase_ ):
stacks[i].append(lowerCamelCase_ )
else:
stacks.append(lowerCamelCase_ )
# use a heap-based merge to merge stack efficiently
a__ = merge(*(reversed(lowerCamelCase_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
snake_case : str = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : Any = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 713 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 | 0 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
snake_case : Tuple = get_logger(__name__)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Any=0 ):
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
a__ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
a__ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
a__ = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
a__ = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
a__ = os.path.join(A__ , A__ )
logger.info(F'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
a__ = os.path.join(A__ , F'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(F'Saving model to {ckpt_dir}' )
a__ = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Model saved to {ckpt_dir}' )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
a__ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
a__ = os.path.join(A__ , A__ )
logger.info(F'Loading model from {input_model_file}' )
a__ = torch.load(A__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
a__ = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
a__ = os.path.join(A__ , A__ )
logger.info(F'Loading model from {input_model_file}' )
a__ = torch.load(A__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
a__ = (
os.path.join(A__ , F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
a__ = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
a__ = state_dict['model']
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int]=0 ):
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
a__ = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
a__ = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
a__ = os.path.join(A__ , A__ )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
a__ = os.path.join(A__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
a__ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
a__ = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
a__ = os.path.join(A__ , A__ )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
a__ = torch.load(A__ )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
a__ = (
os.path.join(A__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
a__ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(A__ ) , )
a__ = optim_state['optimizer']
logger.info(F'Optimizer loaded from {ckpt_dir}' )
a__ = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 714 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class snake_case_ (unittest.TestCase ):
def __init__( self :Dict ,__snake_case :int ,__snake_case :Optional[int]=7 ,__snake_case :List[Any]=3 ,__snake_case :Dict=18 ,__snake_case :Dict=30 ,__snake_case :Optional[int]=4_00 ,__snake_case :int=True ,__snake_case :str=None ,__snake_case :Dict=True ,__snake_case :Tuple=None ,__snake_case :List[str]=True ,) -> List[str]:
a__ = size if size is not None else {"shortest_edge": 20}
a__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = image_size
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = do_center_crop
a__ = crop_size
a__ = do_flip_channel_order
def lowerCamelCase__( self :List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class snake_case_ (_UpperCamelCase , unittest.TestCase ):
UpperCAmelCase__ : Tuple = MobileViTImageProcessor if is_vision_available() else None
def lowerCamelCase__( self :str ) -> List[str]:
a__ = MobileViTImageProcessingTester(self )
@property
def lowerCamelCase__( self :Optional[Any] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__( self :Optional[int] ) -> Optional[int]:
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case ,'do_resize' ) )
self.assertTrue(hasattr(__snake_case ,'size' ) )
self.assertTrue(hasattr(__snake_case ,'do_center_crop' ) )
self.assertTrue(hasattr(__snake_case ,'center_crop' ) )
self.assertTrue(hasattr(__snake_case ,'do_flip_channel_order' ) )
def lowerCamelCase__( self :List[Any] ) -> Dict:
a__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
a__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def lowerCamelCase__( self :Tuple ) -> int:
pass
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case ,Image.Image )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def lowerCamelCase__( self :List[Any] ) -> Any:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__snake_case ,numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case ,np.ndarray )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def lowerCamelCase__( self :List[str] ) -> Tuple:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__snake_case ,torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case ,torch.Tensor )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 715 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 0 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
snake_case : List[str] = 5_00_00
snake_case : Optional[int] = 50_00
snake_case , snake_case : Union[str, Any] = os.path.split(__file__)
snake_case : int = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Dict ):
for i in range(a__ ):
a__ = dataset[i]
@get_duration
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ):
for i in range(0 , len(a__ ) , a__ ):
a__ = dataset[i : i + batch_size]
@get_duration
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
a__ = dataset[i]
@get_duration
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str ):
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
a__ = dataset[i : i + batch_size]
def __lowercase ( ):
a__ = {'num examples': SPEED_TEST_N_EXAMPLES}
a__ = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}),
]
a__ = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
a__ = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
a__ = generate_example_dataset(
os.path.join(a__ , 'dataset.arrow' ) , a__ , num_examples=a__ , seq_shapes={'list': (1_0_0,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
a__ = func(a__ , **a__ )
print('shuffling dataset' )
a__ = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(a__ ) )
a__ = func(
a__ , **a__ )
with open(a__ , 'wb' ) as f:
f.write(json.dumps(a__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 716 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
snake_case : Union[str, Any] = TypeVar('''T''')
snake_case : List[Any] = Union[List[T], Tuple[T, ...]]
snake_case : int = Union[T, List[T], Dict[str, T]]
snake_case : Tuple = Union[str, bytes, os.PathLike]
| 717 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
snake_case : str = logging.get_logger(__name__)
snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
snake_case : Union[str, Any] = {
'yjernite/retribert-base-uncased': 5_12,
}
snake_case : Tuple = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Optional[Any] = RetriBertTokenizer
UpperCAmelCase__ : Any = ['input_ids', 'attention_mask']
def __init__( self :Optional[int] ,__snake_case :Any=None ,__snake_case :int=None ,__snake_case :Union[str, Any]=True ,__snake_case :Dict="[UNK]" ,__snake_case :str="[SEP]" ,__snake_case :Union[str, Any]="[PAD]" ,__snake_case :Dict="[CLS]" ,__snake_case :Optional[Any]="[MASK]" ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=None ,**__snake_case :Dict ,) -> Dict:
super().__init__(
UpperCAmelCase__ ,tokenizer_file=UpperCAmelCase__ ,do_lower_case=UpperCAmelCase__ ,unk_token=UpperCAmelCase__ ,sep_token=UpperCAmelCase__ ,pad_token=UpperCAmelCase__ ,cls_token=UpperCAmelCase__ ,mask_token=UpperCAmelCase__ ,tokenize_chinese_chars=UpperCAmelCase__ ,strip_accents=UpperCAmelCase__ ,**UpperCAmelCase__ ,)
a__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' ,UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,UpperCAmelCase__ ) != tokenize_chinese_chars
):
a__ = getattr(UpperCAmelCase__ ,normalizer_state.pop('type' ) )
a__ = do_lower_case
a__ = strip_accents
a__ = tokenize_chinese_chars
a__ = normalizer_class(**UpperCAmelCase__ )
a__ = do_lower_case
def lowerCamelCase__( self :Dict ,__snake_case :Tuple ,__snake_case :Union[str, Any]=None ) -> Optional[int]:
a__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> str:
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__( self :str ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Tuple:
a__ = self._tokenizer.model.save(UpperCAmelCase__ ,name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 718 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
a__ = 1.5
a__ = int(factor * num_class_images )
a__ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__lowerCAmelCase , aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' , exist_ok=__lowerCAmelCase )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
a__ = client.query(text=__lowerCAmelCase )
if len(__lowerCAmelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
a__ = int(factor * num_images )
a__ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__lowerCAmelCase , aesthetic_weight=0.1 , )
a__ = 0
a__ = 0
a__ = tqdm(desc='downloading real regularization images' , total=__lowerCAmelCase )
with open(F'{class_data_dir}/caption.txt' , 'w' ) as fa, open(F'{class_data_dir}/urls.txt' , 'w' ) as fa, open(
F'{class_data_dir}/images.txt' , 'w' ) as fa:
while total < num_class_images:
a__ = class_images[count]
count += 1
try:
a__ = requests.get(images['url'] )
if img.status_code == 2_0_0:
a__ = Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'{class_data_dir}/images/{total}.jpg' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __lowercase ( ):
a__ = argparse.ArgumentParser('' , add_help=__lowerCAmelCase )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=__lowerCAmelCase , type=__lowerCAmelCase )
parser.add_argument('--class_data_dir' , help='path to save images' , required=__lowerCAmelCase , type=__lowerCAmelCase )
parser.add_argument('--num_class_images' , help='number of images to download' , default=2_0_0 , type=__lowerCAmelCase )
return parser.parse_args()
if __name__ == "__main__":
snake_case : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 719 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 0 |
from math import ceil
def __lowercase ( __lowerCAmelCase : int = 1_0_0_1 ):
a__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
a__ = 2 * i + 1
a__ = 2 * i
a__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
snake_case : Tuple = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 720 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 0 |
import sys
from pathlib import Path
snake_case : Optional[int] = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
snake_case : Union[str, Any] = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
snake_case : str = """zero2"""
snake_case : Tuple = """zero3"""
snake_case : int = [ZEROa, ZEROa]
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
a__ = parameterized.to_safe_name('_'.join(str(__lowerCAmelCase ) for x in param.args ) )
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
snake_case : int = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class snake_case_ (_snake_case ):
@parameterized.expand(snake_case_ ,name_func=snake_case_ )
def lowerCamelCase__( self :int ,__snake_case :Union[str, Any] ,__snake_case :Dict ) -> Any:
self.run_and_check(
stage=snake_case_ ,model=snake_case_ ,distributed=snake_case_ ,fpaa=snake_case_ ,)
@require_torch_multi_gpu
@parameterized.expand(snake_case_ ,name_func=snake_case_ )
def lowerCamelCase__( self :List[str] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> Dict:
self.run_and_check(
stage=snake_case_ ,model=snake_case_ ,distributed=snake_case_ ,fpaa=snake_case_ ,)
@parameterized.expand(snake_case_ ,name_func=snake_case_ )
def lowerCamelCase__( self :List[Any] ,__snake_case :Optional[int] ,__snake_case :List[Any] ) -> List[Any]:
self.run_and_check(
stage=snake_case_ ,model=snake_case_ ,distributed=snake_case_ ,fpaa=snake_case_ ,)
@require_torch_multi_gpu
@parameterized.expand(snake_case_ ,name_func=snake_case_ )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[Any] ,__snake_case :Tuple ) -> Optional[int]:
self.run_and_check(
stage=snake_case_ ,model=snake_case_ ,distributed=snake_case_ ,fpaa=snake_case_ ,)
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ) -> Union[str, Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Union[str, Any] ,__snake_case :Dict ,__snake_case :Optional[Any] = 10 ,__snake_case :Optional[int] = True ,__snake_case :Tuple = True ,__snake_case :Optional[int] = True ,) -> Any:
a__ = models[model]
a__ = self.run_trainer(
stage=snake_case_ ,model_name=snake_case_ ,eval_steps=snake_case_ ,num_train_epochs=1 ,distributed=snake_case_ ,fpaa=snake_case_ ,)
self.do_checks(snake_case_ )
return output_dir
def lowerCamelCase__( self :Dict ,__snake_case :List[Any] ,__snake_case :Tuple ,__snake_case :Any = 10 ,__snake_case :Optional[Any] = 1 ,__snake_case :Optional[int] = True ,__snake_case :Dict = True ,) -> Optional[int]:
a__ = self.get_auto_remove_tmp_dir('./xxx' ,after=snake_case_ )
a__ = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(snake_case_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
a__ = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
a__ = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
a__ = self.get_launcher(snake_case_ )
a__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case_ ,env=self.get_env() )
return output_dir
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :List[str]=False ) -> Optional[int]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
a__ = min(2 ,get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 721 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 0 |
def __lowercase ( __lowerCAmelCase : int ):
if num <= 0:
raise ValueError('Input must be a positive integer' )
a__ = [True] * (num + 1)
a__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCAmelCase ):
a__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Any = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 700 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 0 |
'''simple docstring'''
import numpy as np
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ):
return np.where(vector > 0 , _lowerCAmelCase , (alpha * (np.exp(_lowerCAmelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ (unittest.TestCase ):
def __init__( self :Dict ,__snake_case :List[Any] ,__snake_case :Optional[Any]=13 ,__snake_case :Optional[Any]=3 ,__snake_case :Tuple=2_24 ,__snake_case :str=30 ,__snake_case :int=4_00 ,__snake_case :Optional[Any]=True ,__snake_case :List[Any]=None ,__snake_case :List[str]=True ,__snake_case :List[str]=[0.5, 0.5, 0.5] ,__snake_case :Optional[Any]=[0.5, 0.5, 0.5] ,) -> Dict:
a__ = size if size is not None else {'''height''': 18, '''width''': 18}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = image_size
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = do_normalize
a__ = image_mean
a__ = image_std
def lowerCamelCase__( self :List[str] ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case_ (lowerCamelCase__ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = ViTImageProcessor if is_vision_available() else None
def lowerCamelCase__( self :Union[str, Any] ) -> str:
a__ = EfficientFormerImageProcessorTester(self )
@property
def lowerCamelCase__( self :str ) -> str:
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCamelCase__( self :Optional[Any] ) -> Any:
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase ,'image_mean' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'image_std' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'do_normalize' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'do_resize' ) )
self.assertTrue(hasattr(__lowerCamelCase ,'size' ) )
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
pass
def lowerCamelCase__( self :int ) -> List[str]:
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,Image.Image )
# Test not batched input
a__ = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
a__ = image_processor(__lowerCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
def lowerCamelCase__( self :Any ) -> Optional[Any]:
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowerCamelCase ,numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,np.ndarray )
# Test not batched input
a__ = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
a__ = image_processor(__lowerCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowerCamelCase ,torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase ,torch.Tensor )
# Test not batched input
a__ = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
a__ = image_processor(__lowerCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
| 702 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
import math
from collections.abc import Callable
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : str ):
a__ = xa
a__ = xa
while True:
if x_n == x_na or function(UpperCamelCase__ ) == function(UpperCamelCase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
a__ = x_na - (
function(UpperCamelCase__ ) / ((function(UpperCamelCase__ ) - function(UpperCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
a__ = x_na
a__ = x_na
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
return math.pow(UpperCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 703 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 0 |
import numpy as np
class snake_case_ :
def __init__( self :Tuple ,__snake_case :List[str]=None ,__snake_case :Union[str, Any]=None ,__snake_case :List[Any]=None ,__snake_case :Optional[int]=None ,__snake_case :List[Any]=None ) -> Any:
self.set_matricies(red=lowercase_ ,green=lowercase_ ,blue=lowercase_ ,red_edge=lowercase_ ,nir=lowercase_ )
def lowerCamelCase__( self :List[Any] ,__snake_case :int=None ,__snake_case :Optional[int]=None ,__snake_case :Dict=None ,__snake_case :Any=None ,__snake_case :Any=None ) -> Tuple:
if red is not None:
a__ = red
if green is not None:
a__ = green
if blue is not None:
a__ = blue
if red_edge is not None:
a__ = red_edge
if nir is not None:
a__ = nir
return True
def lowerCamelCase__( self :Optional[int] ,__snake_case :str="" ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[Any]=None ,__snake_case :Tuple=None ,__snake_case :Any=None ,__snake_case :Any=None ) -> Dict:
self.set_matricies(red=lowercase_ ,green=lowercase_ ,blue=lowercase_ ,red_edge=lowercase_ ,nir=lowercase_ )
a__ = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def lowerCamelCase__( self :List[str] ) -> Any:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def lowerCamelCase__( self :Dict ) -> List[Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowerCamelCase__( self :int ) -> Optional[int]:
return self.nir * (self.red / (self.green**2))
def lowerCamelCase__( self :Dict ) -> Union[str, Any]:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowerCamelCase__( self :int ) -> List[Any]:
return (self.nir - self.red) / (self.nir + self.red)
def lowerCamelCase__( self :Any ) -> List[str]:
return (self.nir - self.blue) / (self.nir + self.blue)
def lowerCamelCase__( self :List[Any] ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green)
def lowerCamelCase__( self :Tuple ) -> Tuple:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowerCamelCase__( self :Union[str, Any] ) -> Dict:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowerCamelCase__( self :Dict ) -> Union[str, Any]:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowerCamelCase__( self :str ) -> Optional[int]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowerCamelCase__( self :List[str] ,__snake_case :List[str]=0.08 ,__snake_case :str=1.22 ,__snake_case :str=0.03 ) -> List[str]:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowerCamelCase__( self :str ) -> List[Any]:
return (self.nir / self.green) - 1
def lowerCamelCase__( self :List[Any] ) -> int:
return (self.nir / self.redEdge) - 1
def lowerCamelCase__( self :Optional[Any] ) -> List[Any]:
return (self.red - self.blue) / self.red
def lowerCamelCase__( self :Tuple ) -> List[str]:
a__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowerCamelCase__( self :Any ) -> Optional[int]:
return self.nir - self.green
def lowerCamelCase__( self :Dict ) -> List[Any]:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowerCamelCase__( self :str ) -> str:
a__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def lowerCamelCase__( self :Any ,__snake_case :Optional[Any]=0.16 ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green + y)
def lowerCamelCase__( self :List[str] ,__snake_case :int=0.5 ) -> List[Any]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowerCamelCase__( self :int ) -> Optional[int]:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict=None ,__snake_case :Optional[Any]=None ) -> Any:
return (self.nir - b) / (a * self.red)
def lowerCamelCase__( self :Optional[int] ) -> int:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowerCamelCase__( self :Any ) -> Optional[int]:
return (self.red + self.green + self.blue) / 30.5
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
return self.nir / self.red
def lowerCamelCase__( self :List[str] ) -> str:
return (self.rvi() - 1) / (self.rvi() + 1)
def lowerCamelCase__( self :Optional[int] ) -> Tuple:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowerCamelCase__( self :Dict ) -> Optional[int]:
return self.green / (self.nir + self.red + self.green)
def lowerCamelCase__( self :Optional[int] ) -> str:
return self.nir / (self.nir + self.red + self.green)
def lowerCamelCase__( self :str ) -> Optional[int]:
return self.red / (self.nir + self.red + self.green)
def lowerCamelCase__( self :Tuple ) -> int:
return (self.green - self.red) / (self.green + self.red)
def lowerCamelCase__( self :int ) -> Tuple:
return (self.red - self.green) / (self.red + self.green)
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
a__ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
a__ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowerCamelCase__( self :str ) -> str:
return self.nir / self.red
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
return (self.ndvi() + 0.5) ** (1 / 2)
def lowerCamelCase__( self :Dict ) -> Optional[int]:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 704 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 0 |
def __lowercase ( __lowerCAmelCase : Dict ):
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
a__ = grid[0]
for row_n in range(1 , len(__lowerCAmelCase ) ):
a__ = grid[row_n]
a__ = fill_row(__lowerCAmelCase , __lowerCAmelCase )
a__ = grid[row_n]
return grid[-1][-1]
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : int ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowerCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Tuple = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class snake_case_ (__lowercase ):
UpperCAmelCase__ : Dict = '''timesformer'''
def __init__( self :Union[str, Any] ,__snake_case :int=2_24 ,__snake_case :str=16 ,__snake_case :List[str]=3 ,__snake_case :str=8 ,__snake_case :Any=7_68 ,__snake_case :int=12 ,__snake_case :Tuple=12 ,__snake_case :Optional[Any]=30_72 ,__snake_case :Any="gelu" ,__snake_case :Optional[Any]=0.0 ,__snake_case :Union[str, Any]=0.0 ,__snake_case :int=0.02 ,__snake_case :str=1E-6 ,__snake_case :Any=True ,__snake_case :str="divided_space_time" ,__snake_case :Dict=0 ,**__snake_case :List[Any] ,) -> List[Any]:
super().__init__(**__snake_case )
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = num_frames
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = layer_norm_eps
a__ = qkv_bias
a__ = attention_type
a__ = drop_path_rate
| 706 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 0 |
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
a__ = len(_SCREAMING_SNAKE_CASE )
a__ = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
a__ = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
a__ = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 707 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class snake_case_ :
def __init__( self :Union[str, Any] ,__snake_case :Optional[Any] ,__snake_case :Dict=13 ,__snake_case :str=7 ,__snake_case :Any=True ,__snake_case :Tuple=True ,__snake_case :Tuple=True ,__snake_case :Dict=99 ,__snake_case :List[Any]=32 ,__snake_case :int=5 ,__snake_case :Optional[int]=4 ,__snake_case :Tuple=37 ,__snake_case :Dict="gelu" ,__snake_case :List[Any]=0.1 ,__snake_case :Optional[int]=0.1 ,__snake_case :Any=5_12 ,__snake_case :Dict=16 ,__snake_case :Tuple=2 ,__snake_case :List[Any]=0.02 ,__snake_case :Tuple=3 ,__snake_case :List[str]=4 ,__snake_case :Optional[Any]=None ,) -> str:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Any ) -> Dict:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Optional[int] ,__snake_case :Union[str, Any] ,*__snake_case :Tuple ) -> Tuple:
a__ = OpenAIGPTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
a__ = model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :Optional[int] ,__snake_case :Optional[Any] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[Any] ,*__snake_case :int ) -> Dict:
a__ = OpenAIGPTLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :List[Any] ,__snake_case :int ,__snake_case :int ,__snake_case :int ,__snake_case :int ,*__snake_case :Union[str, Any] ) -> int:
a__ = OpenAIGPTDoubleHeadsModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Tuple ,__snake_case :Optional[Any] ,__snake_case :Any ,__snake_case :Tuple ,__snake_case :List[Any] ,*__snake_case :List[Any] ) -> Dict:
a__ = self.num_labels
a__ = OpenAIGPTForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = self.prepare_config_and_inputs()
(
a__
) = config_and_inputs
a__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class snake_case_ (A_ , A_ , A_ , unittest.TestCase ):
UpperCAmelCase__ : Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Dict = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCAmelCase__ : List[Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__( self :int ,__snake_case :Tuple ,__snake_case :int ,__snake_case :str ,__snake_case :List[Any] ,__snake_case :int ) -> Any:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :List[Any]=False ) -> List[str]:
a__ = super()._prepare_for_class(__snake_case ,__snake_case ,return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=__snake_case ,)
a__ = inputs_dict["""labels"""]
a__ = inputs_dict["""labels"""]
a__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=__snake_case ,)
a__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__snake_case )
return inputs_dict
def lowerCamelCase__( self :Dict ) -> str:
a__ = OpenAIGPTModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :Dict ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> List[str]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> Dict:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Dict:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__snake_case )
def lowerCamelCase__( self :Any ) -> List[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__snake_case )
@slow
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = OpenAIGPTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :Dict ) -> Optional[Any]:
a__ = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(__snake_case )
a__ = torch.tensor([[4_81, 47_35, 5_44]] ,dtype=torch.long ,device=__snake_case ) # the president is
a__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 708 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Tuple = logging.get_logger(__name__)
snake_case : str = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class snake_case_ (__A ):
UpperCAmelCase__ : Tuple = """wav2vec2"""
def __init__( self :Dict ,__snake_case :List[str]=32 ,__snake_case :List[Any]=7_68 ,__snake_case :int=12 ,__snake_case :Optional[int]=12 ,__snake_case :int=30_72 ,__snake_case :List[str]="gelu" ,__snake_case :Any=0.1 ,__snake_case :Optional[Any]=0.1 ,__snake_case :str=0.1 ,__snake_case :int=0.0 ,__snake_case :Optional[int]=0.0 ,__snake_case :List[Any]=0.1 ,__snake_case :int=0.1 ,__snake_case :str=0.02 ,__snake_case :List[Any]=1E-5 ,__snake_case :List[str]="group" ,__snake_case :List[str]="gelu" ,__snake_case :str=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) ,__snake_case :Optional[Any]=(5, 2, 2, 2, 2, 2, 2) ,__snake_case :Dict=(10, 3, 3, 3, 3, 2, 2) ,__snake_case :List[str]=False ,__snake_case :int=1_28 ,__snake_case :Tuple=16 ,__snake_case :List[str]=False ,__snake_case :Union[str, Any]=True ,__snake_case :Dict=0.05 ,__snake_case :Tuple=10 ,__snake_case :Optional[int]=2 ,__snake_case :int=0.0 ,__snake_case :List[Any]=10 ,__snake_case :List[str]=0 ,__snake_case :Tuple=3_20 ,__snake_case :int=2 ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Tuple=1_00 ,__snake_case :List[str]=2_56 ,__snake_case :Optional[Any]=2_56 ,__snake_case :Dict=0.1 ,__snake_case :Union[str, Any]="sum" ,__snake_case :Optional[Any]=False ,__snake_case :Optional[int]=False ,__snake_case :List[str]=2_56 ,__snake_case :Optional[int]=(5_12, 5_12, 5_12, 5_12, 15_00) ,__snake_case :List[Any]=(5, 3, 3, 1, 1) ,__snake_case :Optional[Any]=(1, 2, 3, 1, 1) ,__snake_case :Dict=5_12 ,__snake_case :int=0 ,__snake_case :str=1 ,__snake_case :List[Any]=2 ,__snake_case :Any=False ,__snake_case :Dict=3 ,__snake_case :Union[str, Any]=2 ,__snake_case :Optional[int]=3 ,__snake_case :Tuple=None ,__snake_case :Dict=None ,**__snake_case :Union[str, Any] ,) -> Dict:
super().__init__(**UpperCamelCase__ ,pad_token_id=UpperCamelCase__ ,bos_token_id=UpperCamelCase__ ,eos_token_id=UpperCamelCase__ )
a__ = hidden_size
a__ = feat_extract_norm
a__ = feat_extract_activation
a__ = list(UpperCamelCase__ )
a__ = list(UpperCamelCase__ )
a__ = list(UpperCamelCase__ )
a__ = conv_bias
a__ = num_conv_pos_embeddings
a__ = num_conv_pos_embedding_groups
a__ = len(self.conv_dim )
a__ = num_hidden_layers
a__ = intermediate_size
a__ = hidden_act
a__ = num_attention_heads
a__ = hidden_dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = feat_proj_dropout
a__ = final_dropout
a__ = layerdrop
a__ = layer_norm_eps
a__ = initializer_range
a__ = vocab_size
a__ = do_stable_layer_norm
a__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ = apply_spec_augment
a__ = mask_time_prob
a__ = mask_time_length
a__ = mask_time_min_masks
a__ = mask_feature_prob
a__ = mask_feature_length
a__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
a__ = num_codevectors_per_group
a__ = num_codevector_groups
a__ = contrastive_logits_temperature
a__ = feat_quantizer_dropout
a__ = num_negatives
a__ = codevector_dim
a__ = proj_codevector_dim
a__ = diversity_loss_weight
# ctc loss
a__ = ctc_loss_reduction
a__ = ctc_zero_infinity
# adapter
a__ = add_adapter
a__ = adapter_kernel_size
a__ = adapter_stride
a__ = num_adapter_layers
a__ = output_hidden_size or hidden_size
a__ = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a__ = list(UpperCamelCase__ )
a__ = list(UpperCamelCase__ )
a__ = list(UpperCamelCase__ )
a__ = xvector_output_dim
@property
def lowerCamelCase__( self :str ) -> Tuple:
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 709 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Any = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class snake_case_ (a__ ):
UpperCAmelCase__ : int = "umt5"
UpperCAmelCase__ : Optional[Any] = ["past_key_values"]
def __init__( self :Optional[int] ,__snake_case :str=25_01_12 ,__snake_case :Optional[Any]=5_12 ,__snake_case :int=64 ,__snake_case :Optional[int]=10_24 ,__snake_case :Dict=8 ,__snake_case :str=None ,__snake_case :List[Any]=6 ,__snake_case :Any=32 ,__snake_case :str=1_28 ,__snake_case :int=0.1 ,__snake_case :int=1E-6 ,__snake_case :Optional[Any]=1.0 ,__snake_case :Any="gated-gelu" ,__snake_case :List[str]=True ,__snake_case :List[str]=True ,__snake_case :Optional[int]="T5Tokenizer" ,__snake_case :Optional[int]=True ,__snake_case :List[str]=0 ,__snake_case :Tuple=1 ,__snake_case :str=0 ,**__snake_case :Optional[int] ,) -> str:
super().__init__(
is_encoder_decoder=lowerCamelCase_ ,tokenizer_class=lowerCamelCase_ ,tie_word_embeddings=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,decoder_start_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
a__ = vocab_size
a__ = d_model
a__ = d_kv
a__ = d_ff
a__ = num_layers
a__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a__ = num_heads
a__ = relative_attention_num_buckets
a__ = relative_attention_max_distance
a__ = dropout_rate
a__ = layer_norm_epsilon
a__ = initializer_factor
a__ = feed_forward_proj
a__ = use_cache
a__ = self.feed_forward_proj.split('-' )
a__ = act_info[-1]
a__ = act_info[0] == 'gated'
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
a__ = 'gelu_new'
@property
def lowerCamelCase__( self :Optional[int] ) -> int:
return self.d_model
@property
def lowerCamelCase__( self :int ) -> Union[str, Any]:
return self.num_heads
@property
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
return self.num_layers
class snake_case_ (a__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowerCamelCase__( self :int ) -> Mapping[str, Mapping[int, str]]:
a__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
a__ = 'past_encoder_sequence + sequence'
a__ = {0: 'batch'}
a__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a__ = {0: 'batch', 1: 'decoder_sequence'}
a__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ ,direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowerCamelCase__( self :Union[str, Any] ) -> int:
return 13
@property
def lowerCamelCase__( self :str ) -> float:
return 5E-4
| 710 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : Dict = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
snake_case : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 0 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case_ (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase__ : List[str] = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self :int ,__snake_case :int ,__snake_case :int ,__snake_case :Optional[int] = None ,__snake_case :int = 5_02_57 ,__snake_case :int = 10_24 ,__snake_case :int = 7_68 ,__snake_case :int = 12 ,__snake_case :int = 12 ,__snake_case :Optional[int] = None ,__snake_case :str = "gelu_new" ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :float = 1E-5 ,__snake_case :float = 0.02 ,__snake_case :bool = True ,__snake_case :bool = True ,__snake_case :bool = False ,__snake_case :bool = False ,) -> List[Any]:
super().__init__()
a__ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
F' `n_embd`: {n_embd} are not equal.' )
a__ = prefix_inner_dim
a__ = prefix_hidden_dim
a__ = (
nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
a__ = (
nn.Linear(self.prefix_hidden_dim ,lowerCAmelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
a__ = GPTaConfig(
vocab_size=lowerCAmelCase_ ,n_positions=lowerCAmelCase_ ,n_embd=lowerCAmelCase_ ,n_layer=lowerCAmelCase_ ,n_head=lowerCAmelCase_ ,n_inner=lowerCAmelCase_ ,activation_function=lowerCAmelCase_ ,resid_pdrop=lowerCAmelCase_ ,embd_pdrop=lowerCAmelCase_ ,attn_pdrop=lowerCAmelCase_ ,layer_norm_epsilon=lowerCAmelCase_ ,initializer_range=lowerCAmelCase_ ,scale_attn_weights=lowerCAmelCase_ ,use_cache=lowerCAmelCase_ ,scale_attn_by_inverse_layer_idx=lowerCAmelCase_ ,reorder_and_upcast_attn=lowerCAmelCase_ ,)
a__ = GPTaLMHeadModel(lowerCAmelCase_ )
def lowerCamelCase__( self :int ,__snake_case :torch.Tensor ,__snake_case :torch.Tensor ,__snake_case :Optional[torch.Tensor] = None ,__snake_case :Optional[torch.Tensor] = None ,) -> Tuple:
a__ = self.transformer.transformer.wte(lowerCAmelCase_ )
a__ = self.encode_prefix(lowerCAmelCase_ )
a__ = self.decode_prefix(lowerCAmelCase_ )
a__ = torch.cat((prefix_embeds, embedding_text) ,dim=1 )
if labels is not None:
a__ = self.get_dummy_token(input_ids.shape[0] ,input_ids.device )
a__ = torch.cat((dummy_token, input_ids) ,dim=1 )
a__ = self.transformer(inputs_embeds=lowerCAmelCase_ ,labels=lowerCAmelCase_ ,attention_mask=lowerCAmelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase__( self :Optional[int] ,__snake_case :int ,__snake_case :torch.device ) -> Tuple:
return torch.zeros(lowerCAmelCase_ ,self.prefix_length ,dtype=torch.intaa ,device=lowerCAmelCase_ )
def lowerCamelCase__( self :List[str] ,__snake_case :List[Any] ) -> Optional[int]:
return self.encode_prefix(lowerCAmelCase_ )
@torch.no_grad()
def lowerCamelCase__( self :Tuple ,__snake_case :str ,__snake_case :Dict ,__snake_case :Union[str, Any] ) -> Any:
a__ = torch.split(lowerCAmelCase_ ,1 ,dim=0 )
a__ = []
a__ = []
for feature in features:
a__ = self.decode_prefix(feature.to(lowerCAmelCase_ ) ) # back to the clip feature
# Only support beam search for now
a__ , a__ = self.generate_beam(
input_embeds=lowerCAmelCase_ ,device=lowerCAmelCase_ ,eos_token_id=lowerCAmelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
a__ = torch.stack(lowerCAmelCase_ )
a__ = torch.stack(lowerCAmelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase__( self :int ,__snake_case :List[str]=None ,__snake_case :Tuple=None ,__snake_case :str=None ,__snake_case :int = 5 ,__snake_case :int = 67 ,__snake_case :float = 1.0 ,__snake_case :Optional[int] = None ,) -> Union[str, Any]:
a__ = eos_token_id
a__ = None
a__ = None
a__ = torch.ones(lowerCAmelCase_ ,device=lowerCAmelCase_ ,dtype=torch.int )
a__ = torch.zeros(lowerCAmelCase_ ,device=lowerCAmelCase_ ,dtype=torch.bool )
if input_embeds is not None:
a__ = input_embeds
else:
a__ = self.transformer.transformer.wte(lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
a__ = self.transformer(inputs_embeds=lowerCAmelCase_ )
a__ = outputs.logits
a__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
a__ = logits.softmax(-1 ).log()
if scores is None:
a__ , a__ = logits.topk(lowerCAmelCase_ ,-1 )
a__ = generated.expand(lowerCAmelCase_ ,*generated.shape[1:] )
a__ , a__ = next_tokens.permute(1 ,0 ), scores.squeeze(0 )
if tokens is None:
a__ = next_tokens
else:
a__ = tokens.expand(lowerCAmelCase_ ,*tokens.shape[1:] )
a__ = torch.cat((tokens, next_tokens) ,dim=1 )
else:
a__ = -float(np.inf )
a__ = 0
a__ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
a__ = scores_sum / seq_lengths[:, None]
a__ , a__ = scores_sum_average.view(-1 ).topk(lowerCAmelCase_ ,-1 )
a__ = next_tokens // scores_sum.shape[1]
a__ = seq_lengths[next_tokens_source]
a__ = next_tokens % scores_sum.shape[1]
a__ = next_tokens.unsqueeze(1 )
a__ = tokens[next_tokens_source]
a__ = torch.cat((tokens, next_tokens) ,dim=1 )
a__ = generated[next_tokens_source]
a__ = scores_sum_average * seq_lengths
a__ = is_stopped[next_tokens_source]
a__ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] ,1 ,-1 )
a__ = torch.cat((generated, next_token_embed) ,dim=1 )
a__ = is_stopped + next_tokens.eq(lowerCAmelCase_ ).squeeze()
if is_stopped.all():
break
a__ = scores / seq_lengths
a__ = scores.argsort(descending=lowerCAmelCase_ )
# tokens tensors are already padded to max_seq_length
a__ = [tokens[i] for i in order]
a__ = torch.stack(lowerCAmelCase_ ,dim=0 )
a__ = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 712 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Any ,__snake_case :List[str] ) -> Union[str, Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
a__ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_UpperCAmelCase )
def lowerCamelCase__( self :str ) -> List[str]:
a__ = '''sshleifer/tiny-gpt2'''
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=_UpperCAmelCase ,inference=_UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_UpperCAmelCase ,)
a__ = PyTorchBenchmark(_UpperCAmelCase )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__( self :int ) -> List[Any]:
a__ = '''sgugger/tiny-distilbert-classification'''
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=_UpperCAmelCase ,inference=_UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_UpperCAmelCase ,only_pretrain_model=_UpperCAmelCase ,)
a__ = PyTorchBenchmark(_UpperCAmelCase )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__( self :Dict ) -> str:
a__ = '''sshleifer/tiny-gpt2'''
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=_UpperCAmelCase ,inference=_UpperCAmelCase ,torchscript=_UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_UpperCAmelCase ,)
a__ = PyTorchBenchmark(_UpperCAmelCase )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = '''sshleifer/tiny-gpt2'''
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=_UpperCAmelCase ,inference=_UpperCAmelCase ,fpaa=_UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_UpperCAmelCase ,)
a__ = PyTorchBenchmark(_UpperCAmelCase )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
a__ = '''sshleifer/tiny-gpt2'''
a__ = AutoConfig.from_pretrained(_UpperCAmelCase )
# set architectures equal to `None`
a__ = None
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=_UpperCAmelCase ,inference=_UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_UpperCAmelCase ,)
a__ = PyTorchBenchmark(_UpperCAmelCase ,configs=[config] )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__( self :int ) -> str:
a__ = '''sshleifer/tiny-gpt2'''
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=_UpperCAmelCase ,inference=_UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_UpperCAmelCase ,)
a__ = PyTorchBenchmark(_UpperCAmelCase )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' )
def lowerCamelCase__( self :Dict ) -> Any:
a__ = '''sshleifer/tiny-gpt2'''
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=_UpperCAmelCase ,inference=_UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=_UpperCAmelCase ,multi_process=_UpperCAmelCase ,)
a__ = PyTorchBenchmark(_UpperCAmelCase )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[Any]:
a__ = '''sshleifer/tiny-gpt2'''
a__ = AutoConfig.from_pretrained(_UpperCAmelCase )
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=_UpperCAmelCase ,inference=_UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_UpperCAmelCase ,)
a__ = PyTorchBenchmark(_UpperCAmelCase ,configs=[config] )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
a__ = '''sshleifer/tinier_bart'''
a__ = AutoConfig.from_pretrained(_UpperCAmelCase )
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=_UpperCAmelCase ,inference=_UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_UpperCAmelCase ,)
a__ = PyTorchBenchmark(_UpperCAmelCase ,configs=[config] )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase__( self :Dict ) -> Any:
a__ = '''sshleifer/tiny-gpt2'''
a__ = AutoConfig.from_pretrained(_UpperCAmelCase )
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=_UpperCAmelCase ,inference=_UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_UpperCAmelCase ,)
a__ = PyTorchBenchmark(_UpperCAmelCase ,configs=[config] )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase__( self :Tuple ) -> Tuple:
a__ = '''sshleifer/tinier_bart'''
a__ = AutoConfig.from_pretrained(_UpperCAmelCase )
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=_UpperCAmelCase ,inference=_UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_UpperCAmelCase ,)
a__ = PyTorchBenchmark(_UpperCAmelCase ,configs=[config] )
a__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase__( self :Optional[int] ) -> Dict:
a__ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=_UpperCAmelCase ,inference=_UpperCAmelCase ,save_to_csv=_UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_UpperCAmelCase ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(_UpperCAmelCase ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(_UpperCAmelCase ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(_UpperCAmelCase ,'train_time.csv' ) ,env_info_csv_file=os.path.join(_UpperCAmelCase ,'env.csv' ) ,multi_process=_UpperCAmelCase ,)
a__ = PyTorchBenchmark(_UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_UpperCAmelCase ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase ,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase ,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase ,'env.csv' ) ).exists() )
def lowerCamelCase__( self :List[str] ) -> Any:
a__ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(__snake_case :Union[str, Any] ):
self.assertTrue(hasattr(_UpperCAmelCase ,'sequential' ) )
self.assertTrue(hasattr(_UpperCAmelCase ,'cumulative' ) )
self.assertTrue(hasattr(_UpperCAmelCase ,'current' ) )
self.assertTrue(hasattr(_UpperCAmelCase ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
a__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=_UpperCAmelCase ,inference=_UpperCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_UpperCAmelCase ,'log.txt' ) ,log_print=_UpperCAmelCase ,trace_memory_line_by_line=_UpperCAmelCase ,multi_process=_UpperCAmelCase ,)
a__ = PyTorchBenchmark(_UpperCAmelCase )
a__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_UpperCAmelCase ,'log.txt' ) ).exists() )
| 713 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 | 0 |
import tensorflow as tf
from ...tf_utils import shape_list
class snake_case_ (tf.keras.layers.Layer ):
def __init__( self :Optional[Any] ,__snake_case :Any ,__snake_case :int ,__snake_case :List[str] ,__snake_case :Optional[int] ,__snake_case :List[str]=1 ,__snake_case :Dict=False ,**__snake_case :Optional[Any] ) -> Any:
super().__init__(**_A )
a__ = vocab_size
a__ = d_embed
a__ = d_proj
a__ = cutoffs + [vocab_size]
a__ = [0] + self.cutoffs
a__ = div_val
a__ = self.cutoffs[0]
a__ = len(self.cutoffs ) - 1
a__ = self.shortlist_size + self.n_clusters
a__ = keep_order
a__ = []
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ) -> int:
if self.n_clusters > 0:
a__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) ,initializer='zeros' ,trainable=_A ,name='cluster_weight' )
a__ = self.add_weight(
shape=(self.n_clusters,) ,initializer='zeros' ,trainable=_A ,name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
a__ = self.add_weight(
shape=(self.d_embed, self.d_proj) ,initializer='zeros' ,trainable=_A ,name=F'out_projs_._{i}' ,)
self.out_projs.append(_A )
else:
self.out_projs.append(_A )
a__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) ,initializer='zeros' ,trainable=_A ,name=F'out_layers_._{i}_._weight' ,)
a__ = self.add_weight(
shape=(self.vocab_size,) ,initializer='zeros' ,trainable=_A ,name=F'out_layers_._{i}_._bias' ,)
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
a__ , a__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a__ = self.d_embed // (self.div_val**i)
a__ = self.add_weight(
shape=(d_emb_i, self.d_proj) ,initializer='zeros' ,trainable=_A ,name=F'out_projs_._{i}' )
self.out_projs.append(_A )
a__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) ,initializer='zeros' ,trainable=_A ,name=F'out_layers_._{i}_._weight' ,)
a__ = self.add_weight(
shape=(r_idx - l_idx,) ,initializer='zeros' ,trainable=_A ,name=F'out_layers_._{i}_._bias' ,)
self.out_layers.append((weight, bias) )
super().build(_A )
@staticmethod
def lowerCamelCase__( __snake_case :int ,__snake_case :Union[str, Any] ,__snake_case :List[Any] ,__snake_case :Any=None ) -> Optional[int]:
a__ = x
if proj is not None:
a__ = tf.einsum('ibd,ed->ibe' ,_A ,_A )
return tf.einsum('ibd,nd->ibn' ,_A ,_A ) + b
@staticmethod
def lowerCamelCase__( __snake_case :int ,__snake_case :int ) -> Tuple:
a__ = shape_list(_A )
a__ = tf.range(lp_size[0] ,dtype=target.dtype )
a__ = tf.stack([r, target] ,1 )
return tf.gather_nd(_A ,_A )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Dict ,__snake_case :Tuple=True ,__snake_case :List[Any]=False ) -> Dict:
a__ = 0
if self.n_clusters == 0:
a__ = self._logit(_A ,self.out_layers[0][0] ,self.out_layers[0][1] ,self.out_projs[0] )
if target is not None:
a__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_A ,logits=_A )
a__ = tf.nn.log_softmax(_A ,axis=-1 )
else:
a__ = shape_list(_A )
a__ = []
a__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
a__ , a__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
a__ = (target >= l_idx) & (target < r_idx)
a__ = tf.where(_A )
a__ = tf.boolean_mask(_A ,_A ) - l_idx
if self.div_val == 1:
a__ = self.out_layers[0][0][l_idx:r_idx]
a__ = self.out_layers[0][1][l_idx:r_idx]
else:
a__ = self.out_layers[i][0]
a__ = self.out_layers[i][1]
if i == 0:
a__ = tf.concat([cur_W, self.cluster_weight] ,0 )
a__ = tf.concat([cur_b, self.cluster_bias] ,0 )
a__ = self._logit(_A ,_A ,_A ,self.out_projs[0] )
a__ = tf.nn.log_softmax(_A )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
a__ = tf.boolean_mask(_A ,_A )
a__ = self._gather_logprob(_A ,_A )
else:
a__ = self._logit(_A ,_A ,_A ,self.out_projs[i] )
a__ = tf.nn.log_softmax(_A )
a__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
a__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_A )
if target is not None:
a__ = tf.boolean_mask(_A ,_A )
a__ = tf.boolean_mask(_A ,_A )
a__ = self._gather_logprob(_A ,_A )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_A ,-cur_logprob ,shape_list(_A ) )
a__ = tf.concat(_A ,axis=-1 )
if target is not None:
if return_mean:
a__ = tf.reduce_mean(_A )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_A )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_A ,name=self.name ,aggregation='mean' if return_mean else '' )
return out
| 714 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 0 |
def __lowercase ( __lowerCAmelCase : list ):
a__ = False
while is_sorted is False: # Until all the indices are traversed keep looping
a__ = True
for i in range(0 , len(UpperCAmelCase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
a__ , a__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
a__ = False
for i in range(1 , len(UpperCAmelCase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
a__ , a__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
a__ = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
snake_case : Any = [int(x) for x in input().split()]
# inputing elements of the list in one line
snake_case : Tuple = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 715 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 0 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int ):
a__ = StableDiffusionPipeline.from_pretrained(_UpperCamelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
a__ = load_file(_UpperCamelCase )
a__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
a__ = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
a__ = pipeline.text_encoder
else:
a__ = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
a__ = pipeline.unet
# find the target layer
a__ = layer_infos.pop(0 )
while len(_UpperCamelCase ) > -1:
try:
a__ = curr_layer.__getattr__(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
a__ = layer_infos.pop(0 )
elif len(_UpperCamelCase ) == 0:
break
except Exception:
if len(_UpperCamelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
a__ = layer_infos.pop(0 )
a__ = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(_UpperCamelCase )
else:
pair_keys.append(_UpperCamelCase )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
a__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
a__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_UpperCamelCase , _UpperCamelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
a__ = state_dict[pair_keys[0]].to(torch.floataa )
a__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_UpperCamelCase , _UpperCamelCase )
# update visited list
for item in pair_keys:
visited.append(_UpperCamelCase )
return pipeline
if __name__ == "__main__":
snake_case : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
snake_case : Tuple = parser.parse_args()
snake_case : Optional[Any] = args.base_model_path
snake_case : int = args.checkpoint_path
snake_case : str = args.dump_path
snake_case : List[str] = args.lora_prefix_unet
snake_case : Optional[Any] = args.lora_prefix_text_encoder
snake_case : int = args.alpha
snake_case : str = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
snake_case : Optional[int] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 716 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
snake_case : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case : Any = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
snake_case : Tuple = {
"""google/electra-small-generator""": 5_12,
"""google/electra-base-generator""": 5_12,
"""google/electra-large-generator""": 5_12,
"""google/electra-small-discriminator""": 5_12,
"""google/electra-base-discriminator""": 5_12,
"""google/electra-large-discriminator""": 5_12,
}
snake_case : List[Any] = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class snake_case_ (__UpperCAmelCase ):
UpperCAmelCase__ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = ElectraTokenizer
def __init__( self :Tuple ,__snake_case :Any=None ,__snake_case :List[str]=None ,__snake_case :List[Any]=True ,__snake_case :str="[UNK]" ,__snake_case :Optional[Any]="[SEP]" ,__snake_case :Union[str, Any]="[PAD]" ,__snake_case :Optional[int]="[CLS]" ,__snake_case :Optional[Any]="[MASK]" ,__snake_case :Union[str, Any]=True ,__snake_case :Optional[int]=None ,**__snake_case :str ,) -> Optional[Any]:
super().__init__(
__snake_case ,tokenizer_file=__snake_case ,do_lower_case=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,pad_token=__snake_case ,cls_token=__snake_case ,mask_token=__snake_case ,tokenize_chinese_chars=__snake_case ,strip_accents=__snake_case ,**__snake_case ,)
a__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,__snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' ,__snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,__snake_case ) != tokenize_chinese_chars
):
a__ = getattr(__snake_case ,normalizer_state.pop('type' ) )
a__ = do_lower_case
a__ = strip_accents
a__ = tokenize_chinese_chars
a__ = normalizer_class(**__snake_case )
a__ = do_lower_case
def lowerCamelCase__( self :List[Any] ,__snake_case :Union[str, Any] ,__snake_case :Dict=None ) -> Optional[Any]:
a__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__( self :int ,__snake_case :Optional[int] ,__snake_case :Optional[Any] = None ) -> Any:
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__( self :Any ,__snake_case :Dict ,__snake_case :int = None ) -> Any:
a__ = self._tokenizer.model.save(__snake_case ,name=__snake_case )
return tuple(__snake_case )
| 717 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : List[str] = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 718 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :List[str] ,__snake_case :Tuple=2 ,__snake_case :Dict=32 ,__snake_case :int=16 ,__snake_case :Dict=3 ,__snake_case :int=True ,__snake_case :Any=True ,__snake_case :Tuple=32 ,__snake_case :List[Any]=4 ,__snake_case :int=[0, 1, 2, 3] ,__snake_case :List[str]=4 ,__snake_case :List[Any]=37 ,__snake_case :Union[str, Any]="gelu" ,__snake_case :Optional[Any]=0.1 ,__snake_case :int=0.1 ,__snake_case :Dict=0.02 ,__snake_case :Dict=3 ,__snake_case :Union[str, Any]=[1, 3_84, 24, 24] ,__snake_case :List[Any]=True ,__snake_case :Union[str, Any]=None ,) -> Any:
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = backbone_out_indices
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = num_labels
a__ = backbone_featmap_shape
a__ = scope
a__ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
a__ = (image_size // patch_size) ** 2
a__ = num_patches + 1
def lowerCamelCase__( self :Any ) -> Union[str, Any]:
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
a__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]:
a__ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 1_92, 3_84, 7_68],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__snake_case ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=__snake_case ,backbone_featmap_shape=self.backbone_featmap_shape ,)
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :Dict ,__snake_case :Dict ) -> Optional[int]:
a__ = DPTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :Tuple ,__snake_case :str ,__snake_case :List[str] ,__snake_case :str ) -> List[str]:
a__ = self.num_labels
a__ = DPTForDepthEstimation(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case )
self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) )
def lowerCamelCase__( self :Optional[int] ,__snake_case :Optional[int] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> Any:
a__ = self.num_labels
a__ = DPTForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,labels=__snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCamelCase__( self :Union[str, Any] ) -> str:
a__ = self.prepare_config_and_inputs()
a__ = config_and_inputs
a__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = False
def lowerCamelCase__( self :str ) -> Optional[int]:
a__ = DPTModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,has_text_modality=__snake_case ,hidden_size=37 )
def lowerCamelCase__( self :List[Any] ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def lowerCamelCase__( self :Dict ) -> int:
pass
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
a__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case ,nn.Linear ) )
def lowerCamelCase__( self :List[str] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__snake_case )
def lowerCamelCase__( self :Tuple ) -> Dict:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__snake_case )
def lowerCamelCase__( self :List[str] ) -> Optional[int]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case )
def lowerCamelCase__( self :Dict ) -> Dict:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = True
if model_class in get_values(__snake_case ):
continue
a__ = model_class(__snake_case )
model.to(__snake_case )
model.train()
a__ = self._prepare_for_class(__snake_case ,__snake_case ,return_labels=__snake_case )
a__ = model(**__snake_case ).loss
loss.backward()
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = False
a__ = True
if model_class in get_values(__snake_case ) or not model_class.supports_gradient_checkpointing:
continue
a__ = model_class(__snake_case )
model.to(__snake_case )
model.gradient_checkpointing_enable()
model.train()
a__ = self._prepare_for_class(__snake_case ,__snake_case ,return_labels=__snake_case )
a__ = model(**__snake_case ).loss
loss.backward()
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = _config_zero_init(__snake_case )
for model_class in self.all_model_classes:
a__ = model_class(config=__snake_case )
# Skip the check for the backbone
a__ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
a__ = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> int:
pass
@slow
def lowerCamelCase__( self :str ) -> Any:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
a__ = DPTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCamelCase__( self :Any ) -> Union[str, Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = '''add'''
with self.assertRaises(__snake_case ):
a__ = DPTForDepthEstimation(__snake_case )
def __lowercase ( ):
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Tuple ) -> str:
a__ = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
a__ = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(__snake_case )
a__ = prepare_img()
a__ = image_processor(images=__snake_case ,return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
a__ = model(**__snake_case )
a__ = outputs.predicted_depth
# verify the predicted depth
a__ = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape ,__snake_case )
a__ = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 ,__snake_case ,atol=1E-4 ) )
| 719 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 0 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
snake_case : int = logging.get_logger(__name__)
class snake_case_ (lowercase_ ):
UpperCAmelCase__ : Optional[int] = ['''input_features''', '''attention_mask''']
def __init__( self :Optional[int] ,__snake_case :Tuple=80 ,__snake_case :Tuple=1_60_00 ,__snake_case :List[Any]=0.0 ,__snake_case :Union[str, Any]=10 ,__snake_case :List[str]=25 ,__snake_case :Dict="hamming_window" ,__snake_case :Any=3_27_68.0 ,__snake_case :Union[str, Any]=0.97 ,__snake_case :int=1.0 ,__snake_case :List[str]=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=False ,**__snake_case :Union[str, Any] ,) -> Optional[int]:
super().__init__(feature_size=__snake_case ,sampling_rate=__snake_case ,padding_value=__snake_case ,**__snake_case )
a__ = feature_size
a__ = sampling_rate
a__ = padding_value
a__ = hop_length
a__ = win_length
a__ = frame_signal_scale
a__ = preemphasis_coeff
a__ = mel_floor
a__ = normalize_means
a__ = normalize_vars
a__ = win_function
a__ = return_attention_mask
a__ = win_length * sampling_rate // 10_00
a__ = hop_length * sampling_rate // 10_00
a__ = optimal_fft_length(self.sample_size )
a__ = (self.n_fft // 2) + 1
def lowerCamelCase__( self :List[Any] ,__snake_case :Tuple ) -> Dict:
if self.win_function == "hamming_window":
a__ = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=__snake_case )
else:
a__ = window_function(window_length=self.sample_size ,name=self.win_function )
a__ = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
a__ = spectrogram(
one_waveform * self.frame_signal_scale ,window=__snake_case ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=__snake_case ,preemphasis=self.preemphasis_coeff ,mel_filters=__snake_case ,mel_floor=self.mel_floor ,log_mel='log' ,)
return msfc_features.T
def lowerCamelCase__( self :int ,__snake_case :int ,__snake_case :Any ,__snake_case :Any ) -> str:
# make sure we normalize float32 arrays
if self.normalize_means:
a__ = x[:input_length].mean(axis=0 )
a__ = np.subtract(__snake_case ,__snake_case )
if self.normalize_vars:
a__ = x[:input_length].std(axis=0 )
a__ = np.divide(__snake_case ,__snake_case )
if input_length < x.shape[0]:
a__ = padding_value
# make sure array is in float32
a__ = x.astype(np.floataa )
return x
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Tuple ,__snake_case :Optional[Any] = None ) -> int:
a__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__snake_case ,__snake_case ,self.padding_value ) for x, n in zip(__snake_case ,__snake_case )]
def __call__( self :List[Any] ,__snake_case :Dict ,__snake_case :Tuple = False ,__snake_case :Union[str, Any] = None ,__snake_case :Union[str, Any] = False ,__snake_case :List[str] = None ,__snake_case :Optional[int] = None ,__snake_case :List[str] = None ,__snake_case :Any = None ,**__snake_case :Tuple ,) -> int:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a__ = isinstance(__snake_case ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
a__ = is_batched_numpy or (
isinstance(__snake_case ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
a__ = [np.asarray(__snake_case ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__snake_case ,np.ndarray ):
a__ = np.asarray(__snake_case ,dtype=np.floataa )
elif isinstance(__snake_case ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ = [raw_speech]
# extract fbank features
a__ = [self._extract_mfsc_features(__snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
a__ = BatchFeature({'input_features': features} )
a__ = self.pad(
__snake_case ,padding=__snake_case ,max_length=__snake_case ,truncation=__snake_case ,pad_to_multiple_of=__snake_case ,return_attention_mask=__snake_case ,**__snake_case ,)
# make sure list is in array format
a__ = padded_inputs.get('input_features' )
if isinstance(input_features[0] ,__snake_case ):
a__ = [np.asarray(__snake_case ,dtype=np.floataa ) for feature in input_features]
a__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
a__ = [np.asarray(__snake_case ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
a__ = (
np.array(__snake_case ,dtype=np.intaa )
if self._get_padding_strategies(__snake_case ,max_length=__snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
a__ = self.normalize(
padded_inputs['input_features'] ,attention_mask=__snake_case )
if return_tensors is not None:
a__ = padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
| 720 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = inspect.getfile(accelerate.test_utils )
a__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
a__ = test_metrics
@require_cpu
def lowerCamelCase__( self :Optional[int] ) -> Optional[int]:
debug_launcher(self.test_metrics.main ,num_processes=1 )
@require_cpu
def lowerCamelCase__( self :Any ) -> List[Any]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowerCamelCase__( self :int ) -> Union[str, Any]:
self.test_metrics.main()
@require_multi_gpu
def lowerCamelCase__( self :List[Any] ) -> Tuple:
print(F'Found {torch.cuda.device_count()} devices.' )
a__ = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case ,env=os.environ.copy() )
| 721 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 0 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
snake_case : Union[str, Any] = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
a__ = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
snake_case : Tuple = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def __lowercase ( __lowerCAmelCase : Dict ):
a__ = list(s_dict.keys() )
for key in keys:
a__ = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ = new_key.replace(__lowerCAmelCase , __lowerCAmelCase )
print(F'{key} -> {new_key}' )
a__ = s_dict.pop(__lowerCAmelCase )
return s_dict
def __lowercase ( __lowerCAmelCase : Tuple ):
a__ , a__ = emb.weight.shape
a__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
a__ = emb.weight.data
return lin_layer
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
a__ = os.path.basename(__lowerCAmelCase )
a__ = url.split('/' )[-2]
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ) and not os.path.isfile(__lowerCAmelCase ):
raise RuntimeError(F'{download_target} exists and is not a regular file' )
if os.path.isfile(__lowerCAmelCase ):
a__ = open(__lowerCAmelCase , 'rb' ).read()
if hashlib.shaaaa(__lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(__lowerCAmelCase ) as source, open(__lowerCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=8_0 , unit='iB' , unit_scale=__lowerCAmelCase , unit_divisor=1_0_2_4 ) as loop:
while True:
a__ = source.read(8_1_9_2 )
if not buffer:
break
output.write(__lowerCAmelCase )
loop.update(len(__lowerCAmelCase ) )
a__ = open(__lowerCAmelCase , 'rb' ).read()
if hashlib.shaaaa(__lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
if ".pt" not in checkpoint_path:
a__ = _download(_MODELS[checkpoint_path] )
else:
a__ = torch.load(__lowerCAmelCase , map_location='cpu' )
a__ = original_checkpoint['dims']
a__ = original_checkpoint['model_state_dict']
a__ = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(__lowerCAmelCase )
rename_keys(__lowerCAmelCase )
a__ = True
a__ = state_dict['decoder.layers.0.fc1.weight'].shape[0]
a__ = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=__lowerCAmelCase , decoder_ffn_dim=__lowerCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
a__ = WhisperForConditionalGeneration(__lowerCAmelCase )
a__ , a__ = model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0 and not set(__lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F' but all the following weights are missing {missing}' )
if tie_embeds:
a__ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ = proj_out_weights
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : Optional[int] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
snake_case : Optional[Any] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 700 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 0 |
'''simple docstring'''
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowerCAmelCase , n - 1 , __lowerCAmelCase ) * a) % mod
else:
a__ = binary_exponentiation(__lowerCAmelCase , n / 2 , __lowerCAmelCase )
return (b * b) % mod
# a prime number
snake_case : str = 7_01
snake_case : Dict = 10_00_00_00_00
snake_case : int = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 701 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.