code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowerCamelCase :
def __init__( self , UpperCAmelCase , ):
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = '''gelu'''
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self ):
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFEsmModel(config=UpperCAmelCase )
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase_ = model(UpperCAmelCase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=UpperCAmelCase )
lowerCamelCase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowerCamelCase_ = model(UpperCAmelCase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(UpperCAmelCase , encoder_hidden_states=UpperCAmelCase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFEsmForMaskedLM(config=UpperCAmelCase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=UpperCAmelCase )
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
a__: Optional[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
a__: Tuple = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
a__: str = False
a__: Tuple = False
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCAmelCase__ ( self ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(UpperCAmelCase , UpperCAmelCase )
for k, v in name.items():
assert isinstance(UpperCAmelCase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(UpperCAmelCase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCAmelCase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(UpperCAmelCase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 29 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 1 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase_ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
lowerCamelCase_ = f"{src_lang}-{tgt_lang}"
lowerCamelCase_ = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
print(f"Generating {path}" )
with open(lowerCAmelCase__ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(lowerCAmelCase__ )
# make sure we are under the root of the project
A_ = Path(__file__).resolve().parent.parent.parent
A_ = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
A_ = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 29 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return base * power(lowerCAmelCase__ ,(exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
A_ = int(input("""Enter the base: """).strip())
A_ = int(input("""Enter the exponent: """).strip())
A_ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A_ = 1 / result
print(f"{base} to the power of {exponent} is {result}")
| 29 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 1 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.exp(lowerCAmelCase__ )
lowerCamelCase_ = torch.sum(lowerCAmelCase__ ,dim=1 ) # sum of exp(x_i)
lowerCamelCase_ = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowerCAmelCase__ ) - B / A
class __lowerCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
lowerCamelCase_ = config.output_attentions
lowerCamelCase_ = config.output_hidden_states
lowerCamelCase_ = nn.ModuleList([BertLayer(UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
lowerCamelCase_ = nn.ModuleList([BertHighway(UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
lowerCamelCase_ = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase__ ( self , UpperCAmelCase ):
if (type(UpperCAmelCase ) is float) or (type(UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowerCamelCase_ = x
else:
lowerCamelCase_ = x
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ):
lowerCamelCase_ = ()
lowerCamelCase_ = ()
lowerCamelCase_ = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowerCamelCase_ = all_hidden_states + (hidden_states,)
lowerCamelCase_ = layer_module(
UpperCAmelCase , UpperCAmelCase , head_mask[i] , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = layer_outputs[0]
if self.output_attentions:
lowerCamelCase_ = all_attentions + (layer_outputs[1],)
lowerCamelCase_ = (hidden_states,)
if self.output_hidden_states:
lowerCamelCase_ = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowerCamelCase_ = current_outputs + (all_attentions,)
lowerCamelCase_ = self.highway[i](UpperCAmelCase )
# logits, pooled_output
if not self.training:
lowerCamelCase_ = highway_exit[0]
lowerCamelCase_ = entropy(UpperCAmelCase )
lowerCamelCase_ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowerCamelCase_ = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowerCamelCase_ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCAmelCase , i + 1 )
else:
lowerCamelCase_ = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowerCamelCase_ = all_hidden_states + (hidden_states,)
lowerCamelCase_ = (hidden_states,)
if self.output_hidden_states:
lowerCamelCase_ = outputs + (all_hidden_states,)
if self.output_attentions:
lowerCamelCase_ = outputs + (all_attentions,)
lowerCamelCase_ = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , lowerCAmelCase , )
class __lowerCamelCase ( lowerCAmelCase ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
lowerCamelCase_ = config
lowerCamelCase_ = BertEmbeddings(UpperCAmelCase )
lowerCamelCase_ = DeeBertEncoder(UpperCAmelCase )
lowerCamelCase_ = BertPooler(UpperCAmelCase )
self.init_weights()
def UpperCAmelCase__ ( self ):
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase__ ( self ):
return self.embeddings.word_embeddings
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = value
def UpperCAmelCase__ ( self , UpperCAmelCase ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCAmelCase )
@add_start_docstrings_to_model_forward(UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
lowerCamelCase_ = input_ids.size()
elif inputs_embeds is not None:
lowerCamelCase_ = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
lowerCamelCase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCamelCase_ = torch.ones(UpperCAmelCase , device=UpperCAmelCase )
if encoder_attention_mask is None:
lowerCamelCase_ = torch.ones(UpperCAmelCase , device=UpperCAmelCase )
if token_type_ids is None:
lowerCamelCase_ = torch.zeros(UpperCAmelCase , dtype=torch.long , device=UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCamelCase_ = self.get_extended_attention_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowerCamelCase_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowerCamelCase_ = encoder_attention_mask[:, None, None, :]
lowerCamelCase_ = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowerCamelCase_ = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCamelCase_ = self.get_head_mask(UpperCAmelCase , self.config.num_hidden_layers )
lowerCamelCase_ = self.embeddings(
input_ids=UpperCAmelCase , position_ids=UpperCAmelCase , token_type_ids=UpperCAmelCase , inputs_embeds=UpperCAmelCase )
lowerCamelCase_ = self.encoder(
UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
lowerCamelCase_ = encoder_outputs[0]
lowerCamelCase_ = self.pooler(UpperCAmelCase )
lowerCamelCase_ = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __lowerCamelCase ( lowerCAmelCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = message
lowerCamelCase_ = exit_layer # start from 1!
class __lowerCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
lowerCamelCase_ = BertPooler(UpperCAmelCase )
lowerCamelCase_ = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase_ = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
# Pooler
lowerCamelCase_ = encoder_outputs[0]
lowerCamelCase_ = self.pooler(UpperCAmelCase )
# "return" pooler_output
# BertModel
lowerCamelCase_ = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowerCamelCase_ = bmodel_output[1]
lowerCamelCase_ = self.dropout(UpperCAmelCase )
lowerCamelCase_ = self.classifier(UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , lowerCAmelCase , )
class __lowerCamelCase ( lowerCAmelCase ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
lowerCamelCase_ = config.num_labels
lowerCamelCase_ = config.num_hidden_layers
lowerCamelCase_ = DeeBertModel(UpperCAmelCase )
lowerCamelCase_ = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase_ = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=-1 , UpperCAmelCase=False , ):
lowerCamelCase_ = self.num_layers
try:
lowerCamelCase_ = self.bert(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , position_ids=UpperCAmelCase , head_mask=UpperCAmelCase , inputs_embeds=UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowerCamelCase_ = outputs[1]
lowerCamelCase_ = self.dropout(UpperCAmelCase )
lowerCamelCase_ = self.classifier(UpperCAmelCase )
lowerCamelCase_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCamelCase_ = e.message
lowerCamelCase_ = e.exit_layer
lowerCamelCase_ = outputs[0]
if not self.training:
lowerCamelCase_ = entropy(UpperCAmelCase )
lowerCamelCase_ = []
lowerCamelCase_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCamelCase_ = MSELoss()
lowerCamelCase_ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase_ = CrossEntropyLoss()
lowerCamelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCamelCase_ = []
for highway_exit in outputs[-1]:
lowerCamelCase_ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCamelCase_ = MSELoss()
lowerCamelCase_ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase_ = CrossEntropyLoss()
lowerCamelCase_ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase )
if train_highway:
lowerCamelCase_ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCamelCase_ = (loss,) + outputs
if not self.training:
lowerCamelCase_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCamelCase_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 29 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __lowerCamelCase ( lowerCAmelCase ):
a__: Tuple = 'SpeechT5FeatureExtractor'
a__: List[str] = 'SpeechT5Tokenizer'
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , *UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''audio''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''text''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''text_target''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''audio_target''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''sampling_rate''' , UpperCAmelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowerCamelCase_ = self.feature_extractor(UpperCAmelCase , *UpperCAmelCase , sampling_rate=UpperCAmelCase , **UpperCAmelCase )
elif text is not None:
lowerCamelCase_ = self.tokenizer(UpperCAmelCase , **UpperCAmelCase )
else:
lowerCamelCase_ = None
if audio_target is not None:
lowerCamelCase_ = self.feature_extractor(audio_target=UpperCAmelCase , *UpperCAmelCase , sampling_rate=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = targets['''input_values''']
elif text_target is not None:
lowerCamelCase_ = self.tokenizer(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = targets['''input_ids''']
else:
lowerCamelCase_ = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase_ = labels
lowerCamelCase_ = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase_ = decoder_attention_mask
return inputs
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''input_values''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''input_ids''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''labels''' , UpperCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowerCamelCase_ = self.feature_extractor.pad(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
elif input_ids is not None:
lowerCamelCase_ = self.tokenizer.pad(UpperCAmelCase , **UpperCAmelCase )
else:
lowerCamelCase_ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase , UpperCAmelCase ) and "input_ids" in labels[0]):
lowerCamelCase_ = self.tokenizer.pad(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = targets['''input_ids''']
else:
lowerCamelCase_ = self.feature_extractor.feature_size
lowerCamelCase_ = self.feature_extractor.num_mel_bins
lowerCamelCase_ = self.feature_extractor.pad(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = feature_size_hack
lowerCamelCase_ = targets['''input_values''']
else:
lowerCamelCase_ = None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase_ = labels
lowerCamelCase_ = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase_ = decoder_attention_mask
return inputs
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
| 29 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 ,input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 1 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
A_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 29 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
A_ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class __lowerCamelCase ( unittest.TestCase ):
a__: Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__: Dict = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__: Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__: List[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
lowerCamelCase_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
lowerCamelCase_ = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}] )
lowerCamelCase_ = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
] , )
lowerCamelCase_ = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
# Legacy behavior
lowerCamelCase_ = text_classifier('''This is great !''' , return_all_scores=UpperCAmelCase )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
lowerCamelCase_ = text_classifier('''This is great !''' , return_all_scores=UpperCAmelCase )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}]] )
lowerCamelCase_ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=UpperCAmelCase )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}],
] , )
lowerCamelCase_ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=UpperCAmelCase )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{'''label''': '''LABEL_0''', '''score''': 0.5_0_4},
{'''label''': '''LABEL_0''', '''score''': 0.5_0_4},
] , )
@require_torch
def UpperCAmelCase__ ( self ):
import torch
lowerCamelCase_ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
lowerCamelCase_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
@require_tf
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
lowerCamelCase_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] )
@slow
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = pipeline('''text-classification''' )
lowerCamelCase_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowerCamelCase_ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowerCamelCase_ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_8_8}] )
@slow
@require_tf
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = pipeline('''text-classification''' , framework='''tf''' )
lowerCamelCase_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowerCamelCase_ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowerCamelCase_ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_8_8}] )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TextClassificationPipeline(model=UpperCAmelCase , tokenizer=UpperCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCamelCase_ = '''HuggingFace is in'''
lowerCamelCase_ = text_classifier(UpperCAmelCase )
self.assertEqual(nested_simplify(UpperCAmelCase ) , [{'''label''': ANY(UpperCAmelCase ), '''score''': ANY(UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
lowerCamelCase_ = ['''HuggingFace is in ''', '''Paris is in France''']
lowerCamelCase_ = text_classifier(UpperCAmelCase )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{'''label''': ANY(UpperCAmelCase ), '''score''': ANY(UpperCAmelCase )}, {'''label''': ANY(UpperCAmelCase ), '''score''': ANY(UpperCAmelCase )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCamelCase_ = text_classifier(UpperCAmelCase , top_k=UpperCAmelCase )
lowerCamelCase_ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [[{'''label''': ANY(UpperCAmelCase ), '''score''': ANY(UpperCAmelCase )}] * N, [{'''label''': ANY(UpperCAmelCase ), '''score''': ANY(UpperCAmelCase )}] * N] , )
lowerCamelCase_ = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
lowerCamelCase_ = text_classifier(UpperCAmelCase )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {'''label''': ANY(UpperCAmelCase ), '''score''': ANY(UpperCAmelCase )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCamelCase_ = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(UpperCAmelCase ):
text_classifier(UpperCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCamelCase_ = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{'''label''': ANY(UpperCAmelCase ), '''score''': ANY(UpperCAmelCase )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 29 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 1 |
"""simple docstring"""
import random
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = num - 1
lowerCamelCase_ = 0
while s % 2 == 0:
lowerCamelCase_ = s // 2
t += 1
for _ in range(5 ):
lowerCamelCase_ = random.randrange(2 ,num - 1 )
lowerCamelCase_ = pow(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
if v != 1:
lowerCamelCase_ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCamelCase_ = i + 1
lowerCamelCase_ = (v**2) % num
return True
def lowercase ( lowerCAmelCase__ ):
if num < 2:
return False
lowerCamelCase_ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ = 1_024 ):
while True:
lowerCamelCase_ = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(lowerCAmelCase__ ):
return num
if __name__ == "__main__":
A_ = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 29 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 1 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowerCamelCase_ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = f"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
lowerCamelCase_ = [sys.executable] + distributed_args
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
| 29 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 | 1 |
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
A_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
A_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
A_ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = None
# source code of `config_class`
lowerCamelCase_ = inspect.getsource(lowerCAmelCase__ )
lowerCamelCase_ = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowerCamelCase_ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase_ = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
lowerCamelCase_ = ckpt_name
break
return checkpoint
def lowercase ( ):
lowerCamelCase_ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCamelCase_ = get_checkpoint_from_config_class(lowerCAmelCase__ )
lowerCamelCase_ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowerCamelCase_ = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 29 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError('''String lengths must match!''' )
lowerCamelCase_ = 0
for chara, chara in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29 | 1 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
A_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowerCamelCase :
def __init__( self , UpperCAmelCase , UpperCAmelCase=16 , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=14 , UpperCAmelCase=10 , UpperCAmelCase=19 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=True , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=[1, 2, 3, 4, 5] , UpperCAmelCase=25 , UpperCAmelCase=5 , ):
lowerCamelCase_ = d_model
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = prediction_length
lowerCamelCase_ = context_length
lowerCamelCase_ = cardinality
lowerCamelCase_ = num_time_features
lowerCamelCase_ = lags_sequence
lowerCamelCase_ = embedding_dimension
lowerCamelCase_ = is_training
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = context_length
lowerCamelCase_ = prediction_length + label_length
lowerCamelCase_ = label_length
lowerCamelCase_ = moving_average
lowerCamelCase_ = autocorrelation_factor
def UpperCAmelCase__ ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = config.context_length + max(config.lags_sequence )
lowerCamelCase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCamelCase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCamelCase_ = floats_tensor([self.batch_size, _past_length] )
lowerCamelCase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCamelCase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCamelCase_ = floats_tensor([self.batch_size, config.prediction_length] )
lowerCamelCase_ = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_config()
lowerCamelCase_ = self.prepare_autoformer_inputs_dict(UpperCAmelCase )
return config, inputs_dict
def UpperCAmelCase__ ( self ):
lowerCamelCase_ , lowerCamelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = AutoformerModel(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
lowerCamelCase_ = model(**UpperCAmelCase )
lowerCamelCase_ = outputs.encoder_last_hidden_state
lowerCamelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = AutoformerEncoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = model.create_network_inputs(**UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCamelCase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCamelCase_ = encoder(inputs_embeds=UpperCAmelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
lowerCamelCase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCamelCase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCamelCase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCamelCase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = AutoformerDecoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase_ = decoder(
trend=UpperCAmelCase , inputs_embeds=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
a__: Tuple = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a__: Union[str, Any] = (AutoformerForPrediction,) if is_torch_available() else ()
a__: List[Any] = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
a__: str = False
a__: int = False
a__: Dict = False
a__: List[str] = False
a__: Optional[int] = False
a__: Optional[Any] = False
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoformerModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_ = model_class.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertEqual(info['''missing_keys'''] , [] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = inspect.signature(getattr(UpperCAmelCase , '''forward''' ) )
# The main input is the name of the argument after `self`
lowerCamelCase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCAmelCase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
lowerCamelCase_ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase )
lowerCamelCase_ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase )
lowerCamelCase_ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase )
lowerCamelCase_ = getattr(self.model_tester , '''d_model''' , UpperCAmelCase )
lowerCamelCase_ = getattr(self.model_tester , '''num_attention_heads''' , UpperCAmelCase )
lowerCamelCase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCamelCase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# decoder attentions
lowerCamelCase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCAmelCase , (list, tuple) )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCamelCase_ = outputs.cross_attentions
self.assertIsInstance(UpperCAmelCase , (list, tuple) )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + 2 , len(UpperCAmelCase ) )
lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase__ ( self ):
super().test_retain_grad_hidden_states_attentions()
def lowercase ( lowerCAmelCase__="train-batch.pt" ):
lowerCamelCase_ = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' ,filename=lowerCAmelCase__ ,repo_type='''dataset''' )
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location=lowerCAmelCase__ )
return batch
@require_torch
@slow
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(UpperCAmelCase )
lowerCamelCase_ = prepare_batch()
with torch.no_grad():
lowerCamelCase_ = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
lowerCamelCase_ = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCAmelCase )
lowerCamelCase_ = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(UpperCAmelCase )
lowerCamelCase_ = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowerCamelCase_ = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
lowerCamelCase_ = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCAmelCase )
lowerCamelCase_ = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(UpperCAmelCase )
lowerCamelCase_ = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowerCamelCase_ = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
lowerCamelCase_ = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCAmelCase )
lowerCamelCase_ = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=UpperCAmelCase )
lowerCamelCase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCAmelCase , rtol=1e-1 ) )
| 29 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Dict = DebertaTokenizer
a__: int = True
a__: List[str] = DebertaTokenizerFast
def UpperCAmelCase__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowerCamelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase_ = {'''unk_token''': '''[UNK]'''}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = '''lower newer'''
return input_text, output_text
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = tokenizer('''Hello''' , '''World''' )
lowerCamelCase_ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase_ = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase_ = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase )
lowerCamelCase_ = [tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase_ = {
'''input_ids''': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase_ = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , UpperCAmelCase )
for expected, decoded in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 1 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __lowerCamelCase ( lowerCAmelCase ):
def __get__( self , UpperCAmelCase , UpperCAmelCase=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
lowerCamelCase_ = '''__cached_''' + self.fget.__name__
lowerCamelCase_ = getattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if cached is None:
lowerCamelCase_ = self.fget(UpperCAmelCase )
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return cached
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"invalid truth value {val!r}" )
def lowercase ( lowerCAmelCase__ ):
if is_torch_fx_proxy(lowerCAmelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCAmelCase__ ,torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCAmelCase__ ,tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCAmelCase__ ,(jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCAmelCase__ ,np.ndarray )
def lowercase ( lowerCAmelCase__ ):
return isinstance(lowerCAmelCase__ ,np.ndarray )
def lowercase ( lowerCAmelCase__ ):
return _is_numpy(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
import torch
return isinstance(lowerCAmelCase__ ,torch.Tensor )
def lowercase ( lowerCAmelCase__ ):
return False if not is_torch_available() else _is_torch(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
import torch
return isinstance(lowerCAmelCase__ ,torch.device )
def lowercase ( lowerCAmelCase__ ):
return False if not is_torch_available() else _is_torch_device(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
import torch
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
if hasattr(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = getattr(lowerCAmelCase__ ,lowerCAmelCase__ )
else:
return False
return isinstance(lowerCAmelCase__ ,torch.dtype )
def lowercase ( lowerCAmelCase__ ):
return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
import tensorflow as tf
return isinstance(lowerCAmelCase__ ,tf.Tensor )
def lowercase ( lowerCAmelCase__ ):
return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCAmelCase__ ,'''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(lowerCAmelCase__ )
return type(lowerCAmelCase__ ) == tf.Tensor
def lowercase ( lowerCAmelCase__ ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCAmelCase__ ,jnp.ndarray )
def lowercase ( lowerCAmelCase__ ):
return False if not is_flax_available() else _is_jax(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
if isinstance(lowerCAmelCase__ ,(dict, UserDict) ):
return {k: to_py_obj(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ ,(list, tuple) ):
return [to_py_obj(lowerCAmelCase__ ) for o in obj]
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ ).tolist()
elif isinstance(lowerCAmelCase__ ,(np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowercase ( lowerCAmelCase__ ):
if isinstance(lowerCAmelCase__ ,(dict, UserDict) ):
return {k: to_numpy(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ ,(list, tuple) ):
return np.array(lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ )
else:
return obj
class __lowerCamelCase ( lowerCAmelCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = fields(self )
# Safety and consistency checks
if not len(UpperCAmelCase ):
raise ValueError(f"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field." )
lowerCamelCase_ = getattr(self , class_fields[0].name )
lowerCamelCase_ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCAmelCase ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = first_field.items()
lowerCamelCase_ = True
else:
try:
lowerCamelCase_ = iter(UpperCAmelCase )
lowerCamelCase_ = True
except TypeError:
lowerCamelCase_ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCAmelCase ):
if (
not isinstance(UpperCAmelCase , (list, tuple) )
or not len(UpperCAmelCase ) == 2
or not isinstance(element[0] , UpperCAmelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCamelCase_ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCamelCase_ = element[1]
elif first_field is not None:
lowerCamelCase_ = first_field
else:
for field in class_fields:
lowerCamelCase_ = getattr(self , field.name )
if v is not None:
lowerCamelCase_ = v
def __delitem__( self , *UpperCAmelCase , **UpperCAmelCase ):
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__( self , UpperCAmelCase ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCAmelCase , UpperCAmelCase )
super().__setattr__(UpperCAmelCase , UpperCAmelCase )
def __setitem__( self , UpperCAmelCase , UpperCAmelCase ):
# Will raise a KeyException if needed
super().__setitem__(UpperCAmelCase , UpperCAmelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
return tuple(self[k] for k in self.keys() )
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase ):
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase ):
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class __lowerCamelCase ( lowerCAmelCase ):
a__: Tuple = 'longest'
a__: Dict = 'max_length'
a__: Optional[int] = 'do_not_pad'
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = 'pt'
a__: Union[str, Any] = 'tf'
a__: int = 'np'
a__: List[str] = 'jax'
class __lowerCamelCase :
def __init__( self , UpperCAmelCase ):
lowerCamelCase_ = context_managers
lowerCamelCase_ = ExitStack()
def __enter__( self ):
for context_manager in self.context_managers:
self.stack.enter_context(UpperCAmelCase )
def __exit__( self , *UpperCAmelCase , **UpperCAmelCase ):
self.stack.__exit__(*UpperCAmelCase , **UpperCAmelCase )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = infer_framework(lowerCAmelCase__ )
if framework == "tf":
lowerCamelCase_ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase_ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase_ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = model_class.__name__
lowerCamelCase_ = infer_framework(lowerCAmelCase__ )
if framework == "tf":
lowerCamelCase_ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase_ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase_ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ = "" ,lowerCAmelCase__ = "." ):
def _flatten_dict(lowerCAmelCase__ ,lowerCAmelCase__="" ,lowerCAmelCase__="." ):
for k, v in d.items():
lowerCamelCase_ = str(lowerCAmelCase__ ) + delimiter + str(lowerCAmelCase__ ) if parent_key else k
if v and isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
yield from flatten_dict(lowerCAmelCase__ ,lowerCAmelCase__ ,delimiter=lowerCAmelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) )
@contextmanager
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
if is_numpy_array(lowerCAmelCase__ ):
return np.transpose(lowerCAmelCase__ ,axes=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.T if axes is None else array.permute(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.transpose(lowerCAmelCase__ ,perm=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.transpose(lowerCAmelCase__ ,axes=lowerCAmelCase__ )
else:
raise ValueError(f"Type not supported for transpose: {type(lowerCAmelCase__ )}." )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if is_numpy_array(lowerCAmelCase__ ):
return np.reshape(lowerCAmelCase__ ,lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.reshape(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.reshape(lowerCAmelCase__ ,lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.reshape(lowerCAmelCase__ ,lowerCAmelCase__ )
else:
raise ValueError(f"Type not supported for reshape: {type(lowerCAmelCase__ )}." )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
if is_numpy_array(lowerCAmelCase__ ):
return np.squeeze(lowerCAmelCase__ ,axis=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.squeeze(lowerCAmelCase__ ,axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.squeeze(lowerCAmelCase__ ,axis=lowerCAmelCase__ )
else:
raise ValueError(f"Type not supported for squeeze: {type(lowerCAmelCase__ )}." )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if is_numpy_array(lowerCAmelCase__ ):
return np.expand_dims(lowerCAmelCase__ ,lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.unsqueeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.expand_dims(lowerCAmelCase__ ,axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.expand_dims(lowerCAmelCase__ ,axis=lowerCAmelCase__ )
else:
raise ValueError(f"Type not supported for expand_dims: {type(lowerCAmelCase__ )}." )
def lowercase ( lowerCAmelCase__ ):
if is_numpy_array(lowerCAmelCase__ ):
return np.size(lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.numel()
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.size(lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return array.size
else:
raise ValueError(f"Type not supported for expand_dims: {type(lowerCAmelCase__ )}." )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
for key, value in auto_map.items():
if isinstance(lowerCAmelCase__ ,(tuple, list) ):
lowerCamelCase_ = [f"{repo_id}--{v}" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCamelCase_ = f"{repo_id}--{value}"
return auto_map
def lowercase ( lowerCAmelCase__ ):
for base_class in inspect.getmro(lowerCAmelCase__ ):
lowerCamelCase_ = base_class.__module__
lowerCamelCase_ = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"Could not infer framework from class {model_class}." )
| 29 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 | 1 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A_ = """src/diffusers"""
# Matches is_xxx_available()
A_ = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
A_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
A_ = """
{0} = None
"""
A_ = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
A_ = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _re_backend.findall(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) == 0:
return None
return "_and_".join(lowerCAmelCase__ )
def lowercase ( ):
with open(os.path.join(lowerCAmelCase__ ,'''__init__.py''' ) ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCamelCase_ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ = 0
lowerCamelCase_ = {}
# Go through the end of the file
while line_index < len(lowerCAmelCase__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowerCamelCase_ = []
# Until we unindent, add backend objects to the list
while line_index < len(lowerCAmelCase__ ) and len(lines[line_index] ) > 1:
lowerCamelCase_ = lines[line_index]
lowerCamelCase_ = _re_single_line_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowerCAmelCase__ ) > 0:
lowerCamelCase_ = objects
else:
line_index += 1
return backend_specific_objects
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if name.isupper():
return DUMMY_CONSTANT.format(lowerCAmelCase__ )
elif name.islower():
return DUMMY_FUNCTION.format(lowerCAmelCase__ ,lowerCAmelCase__ )
else:
return DUMMY_CLASS.format(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__=None ):
if backend_specific_objects is None:
lowerCamelCase_ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ = {}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ = '''[''' + ''', '''.join(f"\"{b}\"" for b in backend.split('''_and_''' ) ) + ''']'''
lowerCamelCase_ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowerCAmelCase__ ,lowerCAmelCase__ ) for o in objects] )
lowerCamelCase_ = dummy_file
return dummy_files
def lowercase ( lowerCAmelCase__=False ):
lowerCamelCase_ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''utils''' )
lowerCamelCase_ = {
backend: os.path.join(lowerCAmelCase__ ,f"dummy_{short_names.get(lowerCAmelCase__ ,lowerCAmelCase__ )}_objects.py" )
for backend in dummy_files.keys()
}
lowerCamelCase_ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCamelCase_ = f.read()
else:
lowerCamelCase_ = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating diffusers.utils.dummy_{short_names.get(lowerCAmelCase__ ,lowerCAmelCase__ )}_objects.py as the main "
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
f"diffusers.utils.dummy_{short_names.get(lowerCAmelCase__ ,lowerCAmelCase__ )}_objects.py. Run `make fix-copies` "
'''to fix this.''' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A_ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 29 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
class __lowerCamelCase ( lowerCAmelCase ):
a__: Union[str, Any] = 'encoder-decoder'
a__: List[Any] = True
def __init__( self , **UpperCAmelCase ):
super().__init__(**UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCamelCase_ = kwargs.pop('''encoder''' )
lowerCamelCase_ = encoder_config.pop('''model_type''' )
lowerCamelCase_ = kwargs.pop('''decoder''' )
lowerCamelCase_ = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase_ = AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = True
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCamelCase_ = True
lowerCamelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
lowerCamelCase_ = self.encoder.to_dict()
lowerCamelCase_ = self.decoder.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 29 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowerCamelCase ( lowerCAmelCase ):
a__: jnp.ndarray
@flax_register_to_config
class __lowerCamelCase ( nn.Module , lowerCAmelCase , lowerCAmelCase ):
a__: int = 32
a__: int = 4
a__: int = 4
a__: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a__: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
a__: Union[bool, Tuple[bool]] = False
a__: Tuple[int] = (320, 640, 1280, 1280)
a__: int = 2
a__: Union[int, Tuple[int]] = 8
a__: Optional[Union[int, Tuple[int]]] = None
a__: int = 1280
a__: float = 0.0
a__: bool = False
a__: jnp.dtype = jnp.floataa
a__: bool = True
a__: int = 0
a__: bool = False
def UpperCAmelCase__ ( self , UpperCAmelCase ):
# init input tensors
lowerCamelCase_ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase_ = jnp.zeros(UpperCAmelCase , dtype=jnp.floataa )
lowerCamelCase_ = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase_ , lowerCamelCase_ = jax.random.split(UpperCAmelCase )
lowerCamelCase_ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )["params"]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.block_out_channels
lowerCamelCase_ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase_ = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase_ = FlaxTimestepEmbedding(UpperCAmelCase , dtype=self.dtype )
lowerCamelCase_ = self.only_cross_attention
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase_ = []
lowerCamelCase_ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase_ = output_channel
lowerCamelCase_ = block_out_channels[i]
lowerCamelCase_ = i == len(UpperCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase_ = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase_ = FlaxDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase )
lowerCamelCase_ = down_blocks
# mid
lowerCamelCase_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCamelCase_ = []
lowerCamelCase_ = list(reversed(UpperCAmelCase ) )
lowerCamelCase_ = list(reversed(UpperCAmelCase ) )
lowerCamelCase_ = list(reversed(UpperCAmelCase ) )
lowerCamelCase_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCamelCase_ = output_channel
lowerCamelCase_ = reversed_block_out_channels[i]
lowerCamelCase_ = reversed_block_out_channels[min(i + 1 , len(UpperCAmelCase ) - 1 )]
lowerCamelCase_ = i == len(UpperCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCamelCase_ = FlaxCrossAttnUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase_ = FlaxUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(UpperCAmelCase )
lowerCamelCase_ = output_channel
lowerCamelCase_ = up_blocks
# out
lowerCamelCase_ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , UpperCAmelCase = False , ):
# 1. time
if not isinstance(UpperCAmelCase , jnp.ndarray ):
lowerCamelCase_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase_ = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase_ = jnp.expand_dims(UpperCAmelCase , 0 )
lowerCamelCase_ = self.time_proj(UpperCAmelCase )
lowerCamelCase_ = self.time_embedding(UpperCAmelCase )
# 2. pre-process
lowerCamelCase_ = jnp.transpose(UpperCAmelCase , (0, 2, 3, 1) )
lowerCamelCase_ = self.conv_in(UpperCAmelCase )
# 3. down
lowerCamelCase_ = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = down_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
else:
lowerCamelCase_ , lowerCamelCase_ = down_block(UpperCAmelCase , UpperCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCamelCase_ = ()
for down_block_res_sample, down_block_additional_residual in zip(
UpperCAmelCase , UpperCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase_ = new_down_block_res_samples
# 4. mid
lowerCamelCase_ = self.mid_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCamelCase_ = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCamelCase_ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = up_block(
UpperCAmelCase , temb=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train , )
else:
lowerCamelCase_ = up_block(UpperCAmelCase , temb=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train )
# 6. post-process
lowerCamelCase_ = self.conv_norm_out(UpperCAmelCase )
lowerCamelCase_ = nn.silu(UpperCAmelCase )
lowerCamelCase_ = self.conv_out(UpperCAmelCase )
lowerCamelCase_ = jnp.transpose(UpperCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=UpperCAmelCase )
| 29 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 1 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( lowerCAmelCase__ ):
for param in module.parameters():
lowerCamelCase_ = False
def lowercase ( ):
lowerCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCamelCase_ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = plt.imshow(lowerCAmelCase__ )
fig.axes.get_xaxis().set_visible(lowerCAmelCase__ )
fig.axes.get_yaxis().set_visible(lowerCAmelCase__ )
plt.show()
def lowercase ( ):
lowerCamelCase_ = datetime.now()
lowerCamelCase_ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 29 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 1 |
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
A_ = None
A_ = {
"""7B""": 1_1008,
"""13B""": 1_3824,
"""30B""": 1_7920,
"""65B""": 2_2016,
"""70B""": 2_8672,
}
A_ = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ,lowerCAmelCase__=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase ( lowerCAmelCase__ ):
with open(lowerCAmelCase__ ,'''r''' ) as f:
return json.load(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
with open(lowerCAmelCase__ ,'''w''' ) as f:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''tmp''' )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = read_json(os.path.join(lowerCAmelCase__ ,'''params.json''' ) )
lowerCamelCase_ = NUM_SHARDS[model_size]
lowerCamelCase_ = params['''n_layers''']
lowerCamelCase_ = params['''n_heads''']
lowerCamelCase_ = n_heads // num_shards
lowerCamelCase_ = params['''dim''']
lowerCamelCase_ = dim // n_heads
lowerCamelCase_ = 10_000.0
lowerCamelCase_ = 1.0 / (base ** (torch.arange(0 ,lowerCAmelCase__ ,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowerCamelCase_ = params['''n_kv_heads'''] # for GQA / MQA
lowerCamelCase_ = n_heads_per_shard // num_key_value_heads
lowerCamelCase_ = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowerCamelCase_ = n_heads
lowerCamelCase_ = n_heads_per_shard
lowerCamelCase_ = dim
# permute for sliced rotary
def permute(lowerCAmelCase__ ,lowerCAmelCase__=n_heads ,lowerCAmelCase__=dim ,lowerCAmelCase__=dim ):
return w.view(lowerCAmelCase__ ,dima // n_heads // 2 ,2 ,lowerCAmelCase__ ).transpose(1 ,2 ).reshape(lowerCAmelCase__ ,lowerCAmelCase__ )
print(f"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowerCamelCase_ = torch.load(os.path.join(lowerCAmelCase__ ,'''consolidated.00.pth''' ) ,map_location='''cpu''' )
else:
# Sharded
lowerCamelCase_ = [
torch.load(os.path.join(lowerCAmelCase__ ,f"consolidated.{i:02d}.pth" ) ,map_location='''cpu''' )
for i in range(lowerCAmelCase__ )
]
lowerCamelCase_ = 0
lowerCamelCase_ = {'''weight_map''': {}}
for layer_i in range(lowerCAmelCase__ ):
lowerCamelCase_ = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
lowerCamelCase_ = {
f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"] ),
f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"] ),
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowerCamelCase_ = {
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
lowerCamelCase_ = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
for i in range(lowerCAmelCase__ )
] ,dim=0 ,).reshape(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCamelCase_ = permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
for i in range(lowerCAmelCase__ )
] ,dim=0 ,).reshape(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,)
lowerCamelCase_ = torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
for i in range(lowerCAmelCase__ )
] ,dim=0 ,).reshape(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(lowerCAmelCase__ )] ,dim=1 )
lowerCamelCase_ = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(lowerCAmelCase__ )] ,dim=0 )
lowerCamelCase_ = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(lowerCAmelCase__ )] ,dim=1 )
lowerCamelCase_ = torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(lowerCAmelCase__ )] ,dim=0 )
lowerCamelCase_ = inv_freq
for k, v in state_dict.items():
lowerCamelCase_ = filename
param_count += v.numel()
torch.save(lowerCAmelCase__ ,os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCamelCase_ = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
lowerCamelCase_ = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
lowerCamelCase_ = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(lowerCAmelCase__ )] ,dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(lowerCAmelCase__ )] ,dim=0 ),
}
for k, v in state_dict.items():
lowerCamelCase_ = filename
param_count += v.numel()
torch.save(lowerCAmelCase__ ,os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) )
# Write configs
lowerCamelCase_ = {'''total_size''': param_count * 2}
write_json(lowerCAmelCase__ ,os.path.join(lowerCAmelCase__ ,'''pytorch_model.bin.index.json''' ) )
lowerCamelCase_ = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
lowerCamelCase_ = params['''multiple_of'''] if '''multiple_of''' in params else 256
lowerCamelCase_ = LlamaConfig(
hidden_size=lowerCAmelCase__ ,intermediate_size=compute_intermediate_size(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,num_attention_heads=params['''n_heads'''] ,num_hidden_layers=params['''n_layers'''] ,rms_norm_eps=params['''norm_eps'''] ,num_key_value_heads=lowerCAmelCase__ ,)
config.save_pretrained(lowerCAmelCase__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
lowerCamelCase_ = LlamaForCausalLM.from_pretrained(lowerCAmelCase__ ,torch_dtype=torch.floataa ,low_cpu_mem_usage=lowerCAmelCase__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(lowerCAmelCase__ ,safe_serialization=lowerCAmelCase__ )
shutil.rmtree(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
# Initialize the tokenizer based on the `spm` model
lowerCamelCase_ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
lowerCamelCase_ = tokenizer_class(lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
def lowercase ( ):
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' ,help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' ,)
parser.add_argument(
'''--model_size''' ,choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] ,)
parser.add_argument(
'''--output_dir''' ,help='''Location to write HF model and tokenizer''' ,)
parser.add_argument('''--safe_serialization''' ,type=lowerCAmelCase__ ,help='''Whether or not to save using `safetensors`.''' )
lowerCamelCase_ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,)
lowerCamelCase_ = os.path.join(args.input_dir ,'''tokenizer.model''' )
write_tokenizer(args.output_dir ,lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 29 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
stooge(lowerCAmelCase__ ,0 ,len(lowerCAmelCase__ ) - 1 )
return arr
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCamelCase_ , lowerCamelCase_ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCamelCase_ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowerCAmelCase__ ,lowerCAmelCase__ ,(h - t) )
# Recursively sort last 2/3 elements
stooge(lowerCAmelCase__ ,i + t ,(lowerCAmelCase__) )
# Recursively sort first 2/3 elements
stooge(lowerCAmelCase__ ,lowerCAmelCase__ ,(h - t) )
if __name__ == "__main__":
A_ = input("""Enter numbers separated by a comma:\n""").strip()
A_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 29 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ = logging.get_logger(__name__)
class __lowerCamelCase ( lowerCAmelCase ):
a__: Tuple = ['pixel_values']
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = 32 , UpperCAmelCase=PILImageResampling.BILINEAR , UpperCAmelCase = True , **UpperCAmelCase , ):
lowerCamelCase_ = do_resize
lowerCamelCase_ = do_rescale
lowerCamelCase_ = size_divisor
lowerCamelCase_ = resample
super().__init__(**UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = get_image_size(UpperCAmelCase )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCamelCase_ = height // size_divisor * size_divisor
lowerCamelCase_ = width // size_divisor * size_divisor
lowerCamelCase_ = resize(UpperCAmelCase , (new_h, new_w) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
return image
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase ):
return rescale(image=UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ):
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = size_divisor if size_divisor is not None else self.size_divisor
lowerCamelCase_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
lowerCamelCase_ = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(UpperCAmelCase ) for img in images]
if do_resize:
lowerCamelCase_ = [self.resize(UpperCAmelCase , size_divisor=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(UpperCAmelCase , scale=1 / 255 ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowerCamelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 29 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 1 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A_ = logging.get_logger(__name__)
A_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
A_ = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
A_ = {
"""Salesforce/codegen-350M-mono""": 2048,
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: List[str] = VOCAB_FILES_NAMES
a__: Any = PRETRAINED_VOCAB_FILES_MAP
a__: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__: Union[str, Any] = ['input_ids', 'attention_mask']
a__: List[str] = CodeGenTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ):
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
if kwargs.pop('''add_bos_token''' , UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
lowerCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase ) != add_prefix_space:
lowerCamelCase_ = getattr(UpperCAmelCase , pre_tok_state.pop('''type''' ) )
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = pre_tok_class(**UpperCAmelCase )
lowerCamelCase_ = add_prefix_space
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.get('''is_split_into_words''' , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.get('''is_split_into_words''' , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
lowerCamelCase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
lowerCamelCase_ = super().decode(
token_ids=UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , **UpperCAmelCase , )
if truncate_before_pattern is not None and len(UpperCAmelCase ) > 0:
lowerCamelCase_ = self.truncate(UpperCAmelCase , UpperCAmelCase )
return decoded_text
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
def find_re(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = pattern.search(UpperCAmelCase , UpperCAmelCase )
return m.start() if m else -1
lowerCamelCase_ = [re.compile(UpperCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCamelCase_ = list(re.finditer('''^print''' , UpperCAmelCase , re.MULTILINE ) )
if len(UpperCAmelCase ) > 1:
lowerCamelCase_ = completion[: prints[1].start()]
lowerCamelCase_ = list(re.finditer('''^def''' , UpperCAmelCase , re.MULTILINE ) )
if len(UpperCAmelCase ) > 1:
lowerCamelCase_ = completion[: defs[1].start()]
lowerCamelCase_ = 0
lowerCamelCase_ = [
pos for pos in [find_re(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) for terminal in terminals] if pos != -1
]
if len(UpperCAmelCase ) > 0:
return completion[: min(UpperCAmelCase )]
else:
return completion
| 29 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowerCamelCase_ = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowercase ( lowerCAmelCase__ ):
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCamelCase_ = name.replace('''img_encoder.pos_embed''' ,'''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''img_encoder.patch_embed.proj''' ,'''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''img_encoder.patch_embed.norm''' ,'''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
lowerCamelCase_ = name.replace('''img_encoder.layers''' ,'''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase_ = name.replace('''attn''' ,'''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase_ = name.replace('''proj''' ,'''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase_ = name.replace('''pre_assign_attn.attn.proj''' ,'''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
lowerCamelCase_ = name.replace('''norm1''' ,'''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase_ = name.replace('''norm2''' ,'''layer_norm2''' )
if "img_encoder.norm" in name:
lowerCamelCase_ = name.replace('''img_encoder.norm''' ,'''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase_ = name.replace('''text_encoder.token_embedding''' ,'''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
lowerCamelCase_ = name.replace('''text_encoder.positional_embedding''' ,'''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase_ = name.replace('''text_encoder.transformer.resblocks.''' ,'''text_model.encoder.layers.''' )
if "ln_1" in name:
lowerCamelCase_ = name.replace('''ln_1''' ,'''layer_norm1''' )
if "ln_2" in name:
lowerCamelCase_ = name.replace('''ln_2''' ,'''layer_norm2''' )
if "c_fc" in name:
lowerCamelCase_ = name.replace('''c_fc''' ,'''fc1''' )
if "c_proj" in name:
lowerCamelCase_ = name.replace('''c_proj''' ,'''fc2''' )
if "text_encoder" in name:
lowerCamelCase_ = name.replace('''text_encoder''' ,'''text_model''' )
if "ln_final" in name:
lowerCamelCase_ = name.replace('''ln_final''' ,'''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase_ = name.replace('''img_projector.linear_hidden.''' ,'''visual_projection.''' )
if "img_projector.linear_out." in name:
lowerCamelCase_ = name.replace('''img_projector.linear_out.''' ,'''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
lowerCamelCase_ = name.replace('''text_projector.linear_hidden''' ,'''text_projection''' )
if "text_projector.linear_out" in name:
lowerCamelCase_ = name.replace('''text_projector.linear_out''' ,'''text_projection.3''' )
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ , lowerCamelCase_ = int(key_split[2] ), int(key_split[4] )
lowerCamelCase_ = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = int(key_split[3] )
lowerCamelCase_ = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase_ = val.squeeze_()
else:
lowerCamelCase_ = val
return orig_state_dict
def lowercase ( ):
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__="groupvit-gcc-yfcc" ,lowerCAmelCase__=False ):
lowerCamelCase_ = GroupViTConfig()
lowerCamelCase_ = GroupViTModel(lowerCAmelCase__ ).eval()
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
lowerCamelCase_ = convert_state_dict(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = model.load_state_dict(lowerCAmelCase__ ,strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
lowerCamelCase_ = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = processor(text=['''a photo of a cat''', '''a photo of a dog'''] ,images=lowerCAmelCase__ ,padding=lowerCAmelCase__ ,return_tensors='''pt''' )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image ,lowerCAmelCase__ ,atol=1E-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print('''Successfully saved processor and model to''' ,lowerCAmelCase__ )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ ,organization='''nielsr''' )
model.push_to_hub(lowerCAmelCase__ ,organization='''nielsr''' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
A_ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 29 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 1 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
A_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
A_ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return np.sqrt(np.sum((np.asarray(lowerCAmelCase__ ) - np.asarray(lowerCAmelCase__ )) ** 2 ) )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase__ ,lowerCAmelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def lowercase ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' ,number=10_000 ,globals=globals() ,) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' ,number=10_000 ,globals=globals() ,) )
benchmark()
| 29 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 1 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
A_ = """http://www.mocksite.com/file1.txt"""
A_ = """\"text\": [\"foo\", \"foo\"]"""
A_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class __lowerCamelCase :
a__: str = 200
a__: Tuple = {'Content-Length': '100'}
a__: Any = {}
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return [bytes(UpperCAmelCase , '''utf-8''' )]
def lowercase ( *lowerCAmelCase__ ,**lowerCAmelCase__ ):
return MockResponse()
@pytest.mark.parametrize('''urls_type''' ,[str, list, dict] )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
import requests
monkeypatch.setattr(lowerCAmelCase__ ,'''request''' ,lowerCAmelCase__ )
lowerCamelCase_ = URL
if issubclass(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = url
elif issubclass(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [url]
elif issubclass(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = {'''train''': url}
lowerCamelCase_ = '''dummy'''
lowerCamelCase_ = '''downloads'''
lowerCamelCase_ = tmp_path
lowerCamelCase_ = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) ,use_etag=lowerCAmelCase__ ,)
lowerCamelCase_ = DownloadManager(dataset_name=lowerCAmelCase__ ,download_config=lowerCAmelCase__ )
lowerCamelCase_ = dl_manager.download(lowerCAmelCase__ )
lowerCamelCase_ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [downloaded_paths]
lowerCamelCase_ = [urls]
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
assert "train" in downloaded_paths.keys()
lowerCamelCase_ = downloaded_paths.values()
lowerCamelCase_ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCamelCase_ = Path(lowerCAmelCase__ )
lowerCamelCase_ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCamelCase_ = downloaded_path.read_text()
assert content == CONTENT
lowerCamelCase_ = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
lowerCamelCase_ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' ,[str, list, dict] )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if issubclass(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = filename
elif issubclass(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [filename]
elif issubclass(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = {'''train''': filename}
lowerCamelCase_ = '''dummy'''
lowerCamelCase_ = xz_file.parent
lowerCamelCase_ = '''extracted'''
lowerCamelCase_ = DownloadConfig(
cache_dir=lowerCAmelCase__ ,use_etag=lowerCAmelCase__ ,)
lowerCamelCase_ = DownloadManager(dataset_name=lowerCAmelCase__ ,download_config=lowerCAmelCase__ )
lowerCamelCase_ = dl_manager.extract(lowerCAmelCase__ )
lowerCamelCase_ = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [extracted_paths]
lowerCamelCase_ = [paths]
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
assert "train" in extracted_paths.keys()
lowerCamelCase_ = extracted_paths.values()
lowerCamelCase_ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCamelCase_ = Path(lowerCAmelCase__ )
lowerCamelCase_ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase__ ,etag=lowerCAmelCase__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCamelCase_ = extracted_path.read_text()
lowerCamelCase_ = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(lowerCAmelCase__ ,start=1 ):
lowerCamelCase_ = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' ,['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = request.getfixturevalue(lowerCAmelCase__ )
lowerCamelCase_ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase__ ) ,start=1 ):
_test_jsonl(lowerCAmelCase__ ,lowerCAmelCase__ )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' ,['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = request.getfixturevalue(lowerCAmelCase__ )
lowerCamelCase_ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase__ ) ,start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase__ ) ,start=1 ):
_test_jsonl(lowerCAmelCase__ ,lowerCAmelCase__ )
assert num_tar == 1
assert num_jsonl == 2
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase__ ) ,start=1 ):
assert os.path.basename(lowerCAmelCase__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 29 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
# fmt: off
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=0 ):
return sorted(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : x[column] )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=float('''inf''' ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 ,lowerCAmelCase__ ):
lowerCamelCase_ = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
lowerCamelCase_ = current_dis
return min_dis
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=float('''inf''' ) ):
for i in range(min(6 ,points_counts - 1 ) ,lowerCAmelCase__ ):
for j in range(max(0 ,i - 6 ) ,lowerCAmelCase__ ):
lowerCamelCase_ = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
lowerCamelCase_ = current_dis
return min_dis
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(lowerCAmelCase__ ,lowerCAmelCase__ )
# recursion
lowerCamelCase_ = points_counts // 2
lowerCamelCase_ = closest_pair_of_points_sqr(
lowerCAmelCase__ ,points_sorted_on_y[:mid] ,lowerCAmelCase__ )
lowerCamelCase_ = closest_pair_of_points_sqr(
lowerCAmelCase__ ,points_sorted_on_y[mid:] ,points_counts - mid )
lowerCamelCase_ = min(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCAmelCase__ )
lowerCamelCase_ = dis_between_closest_in_strip(
lowerCAmelCase__ ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ )
return min(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = column_based_sort(lowerCAmelCase__ ,column=0 )
lowerCamelCase_ = column_based_sort(lowerCAmelCase__ ,column=1 )
return (
closest_pair_of_points_sqr(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
) ** 0.5
if __name__ == "__main__":
A_ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 29 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Optional[Any] = PhobertTokenizer
a__: Optional[Any] = False
def UpperCAmelCase__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
lowerCamelCase_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowerCamelCase_ = ['''#version: 0.2''', '''l à</w>''']
lowerCamelCase_ = {'''unk_token''': '''<unk>'''}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = '''Tôi là VinAI Research'''
lowerCamelCase_ = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = '''Tôi là VinAI Research'''
lowerCamelCase_ = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
print(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
| 29 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 1 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
A_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __lowerCamelCase ( lowerCAmelCase ):
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(UpperCAmelCase )
def __call__( self , UpperCAmelCase , **UpperCAmelCase ):
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return {}, {}, {}
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = load_image(UpperCAmelCase )
lowerCamelCase_ = image.size
lowerCamelCase_ = self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = self.model(**UpperCAmelCase )
return model_outputs
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = model_outputs.predicted_depth
lowerCamelCase_ = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=UpperCAmelCase )
lowerCamelCase_ = prediction.squeeze().cpu().numpy()
lowerCamelCase_ = (output * 255 / np.max(UpperCAmelCase )).astype('''uint8''' )
lowerCamelCase_ = Image.fromarray(UpperCAmelCase )
lowerCamelCase_ = {}
lowerCamelCase_ = predicted_depth
lowerCamelCase_ = depth
return output_dict
| 29 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
a__: str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a__: Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a__: str = False
a__: Tuple = False
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ):
lowerCamelCase_ = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class in get_values(UpperCAmelCase ):
lowerCamelCase_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __lowerCamelCase ( lowerCAmelCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.0_2 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = embedding_size
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFMobileBertModel(config=UpperCAmelCase )
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ = model(UpperCAmelCase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFMobileBertForMaskedLM(config=UpperCAmelCase )
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFMobileBertForNextSentencePrediction(config=UpperCAmelCase )
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFMobileBertForPreTraining(config=UpperCAmelCase )
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFMobileBertForSequenceClassification(config=UpperCAmelCase )
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = TFMobileBertForMultipleChoice(config=UpperCAmelCase )
lowerCamelCase_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFMobileBertForTokenClassification(config=UpperCAmelCase )
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFMobileBertForQuestionAnswering(config=UpperCAmelCase )
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
lowerCamelCase_ = TFMobileBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(UpperCAmelCase )[0]
lowerCamelCase_ = [1, 6, 3_0522]
self.assertEqual(output.shape , UpperCAmelCase )
lowerCamelCase_ = tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 29 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( lowerCAmelCase ):
a__: int = 'ClapFeatureExtractor'
a__: Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''sampling_rate''' , UpperCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if audios is not None:
lowerCamelCase_ = self.feature_extractor(
UpperCAmelCase , sampling_rate=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and audios is not None:
lowerCamelCase_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer.model_input_names
lowerCamelCase_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 29 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 1 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
A_ = logging.getLogger(__name__)
def lowercase ( ):
lowerCamelCase_ = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' ,type=lowerCAmelCase__ ,default='''data/dump.txt''' ,help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' ,type=lowerCAmelCase__ ,default='''bert''' ,choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' ,type=lowerCAmelCase__ ,default='''bert-base-uncased''' ,help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' ,type=lowerCAmelCase__ ,default='''data/dump''' ,help='''The dump file prefix.''' )
lowerCamelCase_ = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
lowerCamelCase_ = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
lowerCamelCase_ = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
lowerCamelCase_ = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
lowerCamelCase_ = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path ,'''r''' ,encoding='''utf8''' ) as fp:
lowerCamelCase_ = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f"{len(lowerCAmelCase__ )} examples to process." )
lowerCamelCase_ = []
lowerCamelCase_ = 0
lowerCamelCase_ = 10_000
lowerCamelCase_ = time.time()
for text in data:
lowerCamelCase_ = f"{bos} {text.strip()} {sep}"
lowerCamelCase_ = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
rslt.append(lowerCAmelCase__ )
iter += 1
if iter % interval == 0:
lowerCamelCase_ = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
lowerCamelCase_ = time.time()
logger.info('''Finished binarization''' )
logger.info(f"{len(lowerCAmelCase__ )} examples processed." )
lowerCamelCase_ = f"{args.dump_file}.{args.tokenizer_name}.pickle"
lowerCamelCase_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCamelCase_ = [np.uintaa(lowerCAmelCase__ ) for d in rslt]
else:
lowerCamelCase_ = [np.intaa(lowerCAmelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(lowerCAmelCase__ ,'''wb''' ) as handle:
pickle.dump(rslt_ ,lowerCAmelCase__ ,protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 29 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 | 1 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: str = 'efficientformer'
def __init__( self , UpperCAmelCase = [3, 2, 6, 4] , UpperCAmelCase = [48, 96, 224, 448] , UpperCAmelCase = [True, True, True, True] , UpperCAmelCase = 448 , UpperCAmelCase = 32 , UpperCAmelCase = 4 , UpperCAmelCase = 7 , UpperCAmelCase = 5 , UpperCAmelCase = 8 , UpperCAmelCase = 4 , UpperCAmelCase = 0.0 , UpperCAmelCase = 16 , UpperCAmelCase = 3 , UpperCAmelCase = 3 , UpperCAmelCase = 3 , UpperCAmelCase = 2 , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 1 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = 1e-5 , UpperCAmelCase = "gelu" , UpperCAmelCase = 0.0_2 , UpperCAmelCase = 1e-1_2 , UpperCAmelCase = 224 , UpperCAmelCase = 1e-0_5 , **UpperCAmelCase , ):
super().__init__(**UpperCAmelCase )
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = depths
lowerCamelCase_ = mlp_expansion_ratio
lowerCamelCase_ = downsamples
lowerCamelCase_ = dim
lowerCamelCase_ = key_dim
lowerCamelCase_ = attention_ratio
lowerCamelCase_ = resolution
lowerCamelCase_ = pool_size
lowerCamelCase_ = downsample_patch_size
lowerCamelCase_ = downsample_stride
lowerCamelCase_ = downsample_pad
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = num_metaad_blocks
lowerCamelCase_ = distillation
lowerCamelCase_ = use_layer_scale
lowerCamelCase_ = layer_scale_init_value
lowerCamelCase_ = image_size
lowerCamelCase_ = batch_norm_eps
| 29 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 1 |
"""simple docstring"""
import os
def lowercase ( ):
with open(os.path.dirname(lowerCAmelCase__ ) + '''/grid.txt''' ) as f:
lowerCamelCase_ = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowerCAmelCase__ ) for x in f.readline().split()] )
lowerCamelCase_ = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase_ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase_ = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase_ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase_ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase_ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase_ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
lowerCamelCase_ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase_ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 29 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 1 |
"""simple docstring"""
A_ = 8.3144598
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
A_ = 300
A_ = 28
A_ = rms_speed_of_molecule(temperature, molar_mass)
print(f"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
| 29 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
A_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCamelCase ( lowerCAmelCase ):
a__: int = ['pixel_values']
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , **UpperCAmelCase , ):
super().__init__(**UpperCAmelCase )
lowerCamelCase_ = size if size is not None else {'''shortest_edge''': 224}
lowerCamelCase_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowerCamelCase_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCamelCase_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase , param_name='''crop_size''' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase_ = do_convert_rgb
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ):
lowerCamelCase_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
lowerCamelCase_ = get_resize_output_image_size(UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=UpperCAmelCase )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
lowerCamelCase_ = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ):
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(UpperCAmelCase , param_name='''size''' , default_to_square=UpperCAmelCase )
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(UpperCAmelCase , param_name='''crop_size''' , default_to_square=UpperCAmelCase )
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ = [convert_to_rgb(UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowerCamelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 29 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29 | 1 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowercase ( lowerCAmelCase__ ):
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
A_ = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class __lowerCamelCase ( lowerCAmelCase ):
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
lowerCamelCase_ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=UpperCAmelCase , required=UpperCAmelCase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=UpperCAmelCase , required=UpperCAmelCase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=UpperCAmelCase , required=UpperCAmelCase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=UpperCAmelCase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=UpperCAmelCase )
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , ):
lowerCamelCase_ = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f"Loading model {model_type}" )
lowerCamelCase_ = model_type
lowerCamelCase_ = tf_checkpoint
lowerCamelCase_ = pytorch_dump_output
lowerCamelCase_ = config
lowerCamelCase_ = finetuning_task_name
def UpperCAmelCase__ ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
lowerCamelCase_ = self._tf_checkpoint
lowerCamelCase_ = ''''''
else:
lowerCamelCase_ = self._tf_checkpoint
lowerCamelCase_ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
UpperCAmelCase , self._config , self._pytorch_dump_output , UpperCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 29 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 1 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCamelCase ( lowerCAmelCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , **UpperCAmelCase , ):
super().__init__(
UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths}
lowerCamelCase_ = Text(
cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , **UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
# Build iterable dataset
if self.streaming:
lowerCamelCase_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , )
lowerCamelCase_ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 29 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 1 |
"""simple docstring"""
from __future__ import annotations
A_ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class __lowerCamelCase :
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase_ = {}
lowerCamelCase_ = source_vertex
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = {self.source_vertex}
lowerCamelCase_ = None
lowerCamelCase_ = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCAmelCase )
lowerCamelCase_ = vertex
queue.append(UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase_ = self.parent.get(UpperCAmelCase )
if target_vertex_parent is None:
lowerCamelCase_ = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(UpperCAmelCase )
return self.shortest_path(UpperCAmelCase ) + f"->{target_vertex}"
if __name__ == "__main__":
A_ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 29 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
lowerCamelCase_ = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowerCamelCase_ = model(UpperCAmelCase )['''last_hidden_state''']
lowerCamelCase_ = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , UpperCAmelCase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.convert_to_tensor(
[
[
[0.0_6_8_1_7_6_2, 0.1_0_8_9_4_4_5_1, 0.0_6_7_7_2_5_0_4],
[-0.0_6_4_2_3_6_6_8, 0.0_2_3_6_6_6_1_5, 0.0_4_3_2_9_3_4_4],
[-0.0_6_0_5_7_2_9_5, 0.0_9_9_7_4_1_3_5, -0.0_0_0_7_0_5_8_4],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 29 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = """▁"""
A_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
A_ = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
}
}
A_ = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
A_ = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class __lowerCamelCase ( lowerCAmelCase ):
a__: Union[str, Any] = VOCAB_FILES_NAMES
a__: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__: Tuple = ['input_ids', 'attention_mask']
a__: List[int] = []
a__: List[int] = []
def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase=None , **UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenizer_file=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
lowerCamelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase_ = 1
lowerCamelCase_ = len(self.sp_model )
lowerCamelCase_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCAmelCase )
}
lowerCamelCase_ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCamelCase_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCamelCase_ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowerCamelCase_ = src_lang if src_lang is not None else '''en_XX'''
lowerCamelCase_ = self.lang_code_to_id[self._src_lang]
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
lowerCamelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCAmelCase ):
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCAmelCase__ ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase__ ( self ):
return self._src_lang
@src_lang.setter
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
lowerCamelCase_ = [1] * len(self.prefix_tokens )
lowerCamelCase_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase )) + ([0] * len(UpperCAmelCase )) + suffix_ones
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tgt_lang_id
return inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self , UpperCAmelCase ):
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ = self.sp_model.PieceToId(UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self , UpperCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = ''''''.join(UpperCAmelCase ).replace(UpperCAmelCase , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
if not os.path.isdir(UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase_ = os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , '''wb''' ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = "en_XX" , UpperCAmelCase = None , UpperCAmelCase = "ro_RO" , **UpperCAmelCase , ):
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase__ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = self.lang_code_to_id[src_lang]
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = self.lang_code_to_id[lang]
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
| 29 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 1 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = """▁"""
A_ = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
A_ = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
A_ = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
A_ = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
A_ = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: List[str] = ["input_ids"]
a__: Dict = VOCAB_FILES_NAMES
a__: Any = PRETRAINED_INIT_CONFIGURATION
a__: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a__: str = RESOURCE_FILES_NAMES
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase="utf8" , UpperCAmelCase="[UNK]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase = None , **UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , vocab_file=UpperCAmelCase , encoding=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = sentencepiece_model_ckpt
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCamelCase_ = self.load_vocab(filepath=UpperCAmelCase )
else:
lowerCamelCase_ = {self.sp_model.id_to_piece(UpperCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
lowerCamelCase_ = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self , UpperCAmelCase ):
if text is None:
return None
lowerCamelCase_ = self.tokenize(UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_ = '''''', []
for i, ch in enumerate(UpperCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
lowerCamelCase_ = self.SP_CHAR_MAPPING.get(UpperCAmelCase )
else:
lowerCamelCase_ = unicodedata.normalize('''NFKC''' , UpperCAmelCase )
if self.is_whitespace(UpperCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCAmelCase ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = normalized_text, [], 0
if self.do_lower_case:
lowerCamelCase_ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCamelCase_ = token[1:]
lowerCamelCase_ = text[offset:].index(UpperCAmelCase ) + offset
lowerCamelCase_ = start + len(UpperCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCamelCase_ = end
return token_mapping
@property
def UpperCAmelCase__ ( self ):
return len(self.vocab )
def UpperCAmelCase__ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , UpperCAmelCase ):
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
return "".join((self.SP_CHAR_MAPPING.get(UpperCAmelCase , UpperCAmelCase ) for c in text) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=64 , UpperCAmelCase=0.1 ):
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
lowerCamelCase_ = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
lowerCamelCase_ = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
lowerCamelCase_ = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
lowerCamelCase_ = self.sp_model.EncodeAsPieces(UpperCAmelCase )
else:
lowerCamelCase_ = self.sp_model.SampleEncodeAsPieces(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = []
for pi, piece in enumerate(UpperCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCAmelCase ) and pi != 0:
new_pieces.append(UpperCAmelCase )
continue
else:
continue
lowerCamelCase_ = 0
for i, chunk in enumerate(UpperCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCAmelCase ) or self.is_punct(UpperCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCAmelCase )
lowerCamelCase_ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCamelCase_ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCamelCase_ = i
if len(UpperCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = ''''''.join(UpperCAmelCase ).replace(UpperCAmelCase , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = self.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = ''''''.join(UpperCAmelCase ).replace(UpperCAmelCase , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self , UpperCAmelCase ):
return self.vocab.get(UpperCAmelCase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
return self.reverse_vocab.get(UpperCAmelCase , self.unk_token )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1]
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCAmelCase ) + 1) + [1] * (len(UpperCAmelCase ) + 3)
def UpperCAmelCase__ ( self , UpperCAmelCase ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self , UpperCAmelCase ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self , UpperCAmelCase ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self , UpperCAmelCase ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCAmelCase ) == 1:
lowerCamelCase_ = unicodedata.category(UpperCAmelCase )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = {}
with io.open(UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCAmelCase ):
lowerCamelCase_ = line.rstrip('''\n''' )
lowerCamelCase_ = int(UpperCAmelCase )
return token_to_idx
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
lowerCamelCase_ = 0
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowerCamelCase_ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
''' Please check that the vocabulary is not corrupted!''' )
lowerCamelCase_ = token_index
writer.write(token + '''\n''' )
index += 1
lowerCamelCase_ = os.path.join(UpperCAmelCase , '''sentencepiece.bpe.model''' )
with open(UpperCAmelCase , '''wb''' ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (vocab_file,)
| 29 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 1 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A_ = logging.get_logger(__name__)
A_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCAmelCase )} )
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
a__: int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: int = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
a__: int = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
a__: int = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
a__: float = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
a__: int = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
a__: int = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
a__: int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __lowerCamelCase ( lowerCAmelCase ):
a__: int = 'train'
a__: Any = 'dev'
class __lowerCamelCase ( lowerCAmelCase ):
a__: SquadDataTrainingArguments
a__: List[SquadFeatures]
a__: Split
a__: bool
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = Split.train , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "pt" , ):
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = '''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + '''.lock'''
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(UpperCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features['''features''']
lowerCamelCase_ = self.old_features.get('''dataset''' , UpperCAmelCase )
lowerCamelCase_ = self.old_features.get('''examples''' , UpperCAmelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=UpperCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=UpperCAmelCase , )
lowerCamelCase_ = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , UpperCAmelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCAmelCase ):
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 29 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 1 |
"""simple docstring"""
import enum
import shutil
import sys
A_ , A_ = shutil.get_terminal_size()
A_ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class __lowerCamelCase ( enum.Enum ):
a__: Union[str, Any] = 0
a__: List[str] = 1
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="" ):
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__="" ):
forceWrite(f"\u001b[{color}m{content}\u001b[0m" ,lowerCAmelCase__ )
def lowercase ( ):
forceWrite('''\r''' )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" )
def lowercase ( ):
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def lowercase ( ):
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 29 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = BlipImageProcessor()
lowerCamelCase_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
lowerCamelCase_ = BlipaProcessor(UpperCAmelCase , UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = BlipaProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 29 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 1 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCamelCase :
def __init__( self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=36 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.0_2 , UpperCAmelCase=6 , UpperCAmelCase=6 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , UpperCAmelCase=1000 , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = text_seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = coordinate_size
lowerCamelCase_ = shape_size
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase_ = text_seq_length
lowerCamelCase_ = (image_size // patch_size) ** 2 + 1
lowerCamelCase_ = self.text_seq_length + self.image_seq_length
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase_ = bbox[i, j, 3]
lowerCamelCase_ = bbox[i, j, 1]
lowerCamelCase_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase_ = bbox[i, j, 2]
lowerCamelCase_ = bbox[i, j, 0]
lowerCamelCase_ = t
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCamelCase_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = LayoutLMvaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
# text + image
lowerCamelCase_ = model(UpperCAmelCase , pixel_values=UpperCAmelCase )
lowerCamelCase_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase_ = model(pixel_values=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = LayoutLMvaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = LayoutLMvaForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
a__: Optional[int] = False
a__: List[Any] = False
a__: Tuple = False
a__: str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__: Dict = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = LayoutLMvaModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ):
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if model_class in get_values(UpperCAmelCase ):
lowerCamelCase_ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCAmelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase ):
lowerCamelCase_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
elif model_class in get_values(UpperCAmelCase ):
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
elif model_class in [
*get_values(UpperCAmelCase ),
]:
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
elif model_class in [
*get_values(UpperCAmelCase ),
]:
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase , )
return inputs_dict
def UpperCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = LayoutLMvaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowercase ( ):
lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self ):
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(UpperCAmelCase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=UpperCAmelCase , return_tensors='''pt''' ).pixel_values.to(UpperCAmelCase )
lowerCamelCase_ = torch.tensor([[1, 2]] )
lowerCamelCase_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCamelCase_ = model(
input_ids=input_ids.to(UpperCAmelCase ) , bbox=bbox.to(UpperCAmelCase ) , pixel_values=pixel_values.to(UpperCAmelCase ) , )
# verify the logits
lowerCamelCase_ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
lowerCamelCase_ = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
| 29 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 1 |
"""simple docstring"""
A_ = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowerCamelCase_ = Stack()
lowerCamelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCAmelCase__ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCAmelCase__ )
elif i == ")":
# RULE 4
lowerCamelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operators[opr](lowerCAmelCase__ ,lowerCAmelCase__ )
operand_stack.push(lowerCAmelCase__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A_ = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 29 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 1 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
A_ = """path-to-your-trained-model"""
A_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
A_ = """A photo of sks dog in a bucket"""
A_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 29 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 1 |
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase ( lowerCAmelCase ):
a__: Union[str, Any] = 'M-CLIP'
def __init__( self , UpperCAmelCase=1024 , UpperCAmelCase=768 , **UpperCAmelCase ):
lowerCamelCase_ = transformerDimSize
lowerCamelCase_ = imageDimSize
super().__init__(**UpperCAmelCase )
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = MCLIPConfig
def __init__( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ):
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = XLMRobertaModel(UpperCAmelCase )
lowerCamelCase_ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = self.transformer(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
lowerCamelCase_ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(UpperCAmelCase ), embs
| 29 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29 | 1 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = []
for part_id in partition_order:
lowerCamelCase_ = df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(lowerCAmelCase__ ):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(100 ).repartition(1 )
lowerCamelCase_ = Spark(lowerCAmelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(10 ).repartition(2 )
lowerCamelCase_ = [1, 0]
lowerCamelCase_ = _generate_iterable_examples(lowerCAmelCase__ ,lowerCAmelCase__ ) # Reverse the partitions.
lowerCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ ,lowerCAmelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowerCamelCase_ , lowerCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(10 ).repartition(1 )
lowerCamelCase_ = SparkExamplesIterable(lowerCAmelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
lowerCamelCase_ = lambda lowerCAmelCase__ : x.reverse()
lowerCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ ,[2, 1, 0] )
lowerCamelCase_ = SparkExamplesIterable(lowerCAmelCase__ ).shuffle_data_sources(lowerCAmelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowerCamelCase_ = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=0 ,num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ ,[0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowerCamelCase_ = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=1 ,num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ ,[1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowercase ( ):
lowerCamelCase_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCamelCase_ = spark.range(100 ).repartition(1 )
lowerCamelCase_ = Spark(lowerCAmelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 29 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
return n == n[::-1]
def lowercase ( lowerCAmelCase__ = 1_000_000 ):
lowerCamelCase_ = 0
for i in range(1 ,lowerCAmelCase__ ):
if is_palindrome(lowerCAmelCase__ ) and is_palindrome(bin(lowerCAmelCase__ ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 29 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 | 1 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 1 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 29 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 1 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowerCamelCase_ = range(3 ,int(math.sqrt(lowerCAmelCase__ ) + 1 ) ,2 )
return not any(not number % i for i in odd_numbers )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ,**lowerCAmelCase__ ):
lowerCamelCase_ = factor * value
lowerCamelCase_ = value
while not is_prime(lowerCAmelCase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 ,**lowerCAmelCase__ )
return value
| 29 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return int((input_a, input_a).count(0 ) == 0 )
def lowercase ( ):
assert and_gate(0 ,0 ) == 0
assert and_gate(0 ,1 ) == 0
assert and_gate(1 ,0 ) == 0
assert and_gate(1 ,1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 29 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29 | 1 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 1 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
A_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ):
# Recurse if needed
if "." in tensor_name:
lowerCamelCase_ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCamelCase_ = getattr(lowerCAmelCase__ ,lowerCAmelCase__ )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
lowerCamelCase_ = new_module
lowerCamelCase_ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." )
lowerCamelCase_ = tensor_name in module._buffers
lowerCamelCase_ = getattr(lowerCAmelCase__ ,lowerCAmelCase__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
lowerCamelCase_ = False
lowerCamelCase_ = False
if is_buffer or not is_bitsandbytes_available():
lowerCamelCase_ = False
lowerCamelCase_ = False
else:
lowerCamelCase_ = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
lowerCamelCase_ = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
lowerCamelCase_ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
lowerCamelCase_ = old_value.to(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ ,torch.Tensor ):
lowerCamelCase_ = value.to('''cpu''' )
if value.dtype == torch.inta:
lowerCamelCase_ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,lowerCAmelCase__ ) and fpaa_statistics is None:
lowerCamelCase_ = new_value.T
lowerCamelCase_ = old_value.__dict__
if is_abit:
lowerCamelCase_ = bnb.nn.IntaParams(lowerCAmelCase__ ,requires_grad=lowerCAmelCase__ ,**lowerCAmelCase__ ).to(lowerCAmelCase__ )
elif is_abit:
lowerCamelCase_ = bnb.nn.Paramsabit(lowerCAmelCase__ ,requires_grad=lowerCAmelCase__ ,**lowerCAmelCase__ ).to(lowerCAmelCase__ )
lowerCamelCase_ = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(lowerCAmelCase__ ) )
else:
if value is None:
lowerCamelCase_ = old_value.to(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ ,torch.Tensor ):
lowerCamelCase_ = value.to(lowerCAmelCase__ )
else:
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ ,device=lowerCAmelCase__ )
if is_buffer:
lowerCamelCase_ = new_value
else:
lowerCamelCase_ = nn.Parameter(lowerCAmelCase__ ,requires_grad=old_value.requires_grad )
lowerCamelCase_ = new_value
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=False ):
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase_ = []
current_key_name.append(lowerCAmelCase__ )
if (isinstance(lowerCAmelCase__ ,nn.Linear ) or isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(lowerCAmelCase__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = module.weight.shape
else:
lowerCamelCase_ = module.in_features
lowerCamelCase_ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
lowerCamelCase_ = bnb.nn.LinearabitLt(
lowerCAmelCase__ ,lowerCAmelCase__ ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
lowerCamelCase_ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
lowerCamelCase_ = bnb.nn.Linearabit(
lowerCAmelCase__ ,lowerCAmelCase__ ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
lowerCamelCase_ = True
# Store the module class in case we need to transpose the weight later
lowerCamelCase_ = type(lowerCAmelCase__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowerCAmelCase__ )
if len(list(module.children() ) ) > 0:
lowerCamelCase_ , lowerCamelCase_ = _replace_with_bnb_linear(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,has_been_replaced=lowerCAmelCase__ ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=None ):
lowerCamelCase_ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
lowerCamelCase_ , lowerCamelCase_ = _replace_with_bnb_linear(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def lowercase ( *lowerCAmelCase__ ,**lowerCAmelCase__ ):
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,lowerCAmelCase__ ,)
return replace_with_bnb_linear(*lowerCAmelCase__ ,**lowerCAmelCase__ )
def lowercase ( *lowerCAmelCase__ ,**lowerCAmelCase__ ):
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,lowerCAmelCase__ ,)
return set_module_quantized_tensor_to_device(*lowerCAmelCase__ ,**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = deepcopy(lowerCAmelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
lowerCamelCase_ = find_tied_parameters(lowerCAmelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
lowerCamelCase_ = sum(lowerCAmelCase__ ,[] )
lowerCamelCase_ = len(lowerCAmelCase__ ) > 0
# Check if it is a base model
lowerCamelCase_ = not hasattr(lowerCAmelCase__ ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase_ = list(model.named_children() )
lowerCamelCase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase_ = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
lowerCamelCase_ = list(set(lowerCAmelCase__ ) ) + list(lowerCAmelCase__ )
# remove ".weight" from the keys
lowerCamelCase_ = ['''.weight''', '''.bias''']
lowerCamelCase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase_ = name.replace(lowerCAmelCase__ ,'''''' )
filtered_module_names.append(lowerCAmelCase__ )
return filtered_module_names
| 29 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 1 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowercase ( lowerCAmelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowerCAmelCase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( ):
lowerCamelCase_ = 2
while True:
if is_prime(lowerCAmelCase__ ):
yield num
num += 1
def lowercase ( lowerCAmelCase__ = 2_000_000 ):
return sum(takewhile(lambda lowerCAmelCase__ : x < n ,prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 29 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=18 , UpperCAmelCase=30 , UpperCAmelCase=400 , UpperCAmelCase=True , UpperCAmelCase=32 , UpperCAmelCase=True , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size_divisor
lowerCamelCase_ = do_rescale
def UpperCAmelCase__ ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Optional[Any] = GLPNImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GLPNImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''size_divisor''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''resample''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_rescale''' ) )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCAmelCase__ ( self ):
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCAmelCase__ ( self ):
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 29 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: Dict = 'realm'
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=768 , UpperCAmelCase=128 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=8 , UpperCAmelCase=3072 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.0_2 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=256 , UpperCAmelCase=10 , UpperCAmelCase=1e-3 , UpperCAmelCase=5 , UpperCAmelCase=320 , UpperCAmelCase=1335_3718 , UpperCAmelCase=5000 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , **UpperCAmelCase , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
# Common config
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = retriever_proj_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = num_candidates
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = layer_norm_eps
# Reader config
lowerCamelCase_ = span_hidden_size
lowerCamelCase_ = max_span_width
lowerCamelCase_ = reader_layer_norm_eps
lowerCamelCase_ = reader_beam_size
lowerCamelCase_ = reader_seq_len
# Retrieval config
lowerCamelCase_ = num_block_records
lowerCamelCase_ = searcher_beam_size
| 29 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
lowerCamelCase_ = key.replace('''module.encoder''' ,'''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
lowerCamelCase_ = key.replace('''module.decoder''' ,'''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase_ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
lowerCamelCase_ = key.replace(f"patch_embed{idx}" ,f"patch_embeddings.{int(lowerCAmelCase__ )-1}" )
if "norm" in key:
lowerCamelCase_ = key.replace('''norm''' ,'''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase_ = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
lowerCamelCase_ = key.replace(f"layer_norm{idx}" ,f"layer_norm.{int(lowerCAmelCase__ )-1}" )
if "layer_norm1" in key:
lowerCamelCase_ = key.replace('''layer_norm1''' ,'''layer_norm_1''' )
if "layer_norm2" in key:
lowerCamelCase_ = key.replace('''layer_norm2''' ,'''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase_ = key[key.find('''block''' ) + len('''block''' )]
lowerCamelCase_ = key.replace(f"block{idx}" ,f"block.{int(lowerCAmelCase__ )-1}" )
if "attn.q" in key:
lowerCamelCase_ = key.replace('''attn.q''' ,'''attention.self.query''' )
if "attn.proj" in key:
lowerCamelCase_ = key.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in key:
lowerCamelCase_ = key.replace('''attn''' ,'''attention.self''' )
if "fc1" in key:
lowerCamelCase_ = key.replace('''fc1''' ,'''dense1''' )
if "fc2" in key:
lowerCamelCase_ = key.replace('''fc2''' ,'''dense2''' )
if "linear_pred" in key:
lowerCamelCase_ = key.replace('''linear_pred''' ,'''classifier''' )
if "linear_fuse" in key:
lowerCamelCase_ = key.replace('''linear_fuse.conv''' ,'''linear_fuse''' )
lowerCamelCase_ = key.replace('''linear_fuse.bn''' ,'''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase_ = key[key.find('''linear_c''' ) + len('''linear_c''' )]
lowerCamelCase_ = key.replace(f"linear_c{idx}" ,f"linear_c.{int(lowerCAmelCase__ )-1}" )
if "bot_conv" in key:
lowerCamelCase_ = key.replace('''bot_conv''' ,'''0.convolution''' )
if "skip_conv1" in key:
lowerCamelCase_ = key.replace('''skip_conv1''' ,'''1.convolution''' )
if "skip_conv2" in key:
lowerCamelCase_ = key.replace('''skip_conv2''' ,'''2.convolution''' )
if "fusion1" in key:
lowerCamelCase_ = key.replace('''fusion1''' ,'''1.fusion''' )
if "fusion2" in key:
lowerCamelCase_ = key.replace('''fusion2''' ,'''2.fusion''' )
if "fusion3" in key:
lowerCamelCase_ = key.replace('''fusion3''' ,'''3.fusion''' )
if "fusion" in key and "conv" in key:
lowerCamelCase_ = key.replace('''conv''' ,'''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
lowerCamelCase_ = key.replace('''module.last_layer_depth''' ,'''head.head''' )
lowerCamelCase_ = value
return new_state_dict
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase_ = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" )
lowerCamelCase_ = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
lowerCamelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase_ = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase_ = kv_bias[config.hidden_sizes[i] :]
def lowercase ( ):
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
return image
@torch.no_grad()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=None ):
lowerCamelCase_ = GLPNConfig(hidden_sizes=[64, 128, 320, 512] ,decoder_hidden_size=64 ,depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCamelCase_ = GLPNImageProcessor()
# prepare image
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=lowerCAmelCase__ ,return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location=torch.device('''cpu''' ) )
# rename keys
lowerCamelCase_ = rename_keys(lowerCAmelCase__ )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase__ ,lowerCAmelCase__ )
# create HuggingFace model and load state dict
lowerCamelCase_ = GLPNForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# forward pass
lowerCamelCase_ = model(lowerCAmelCase__ )
lowerCamelCase_ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCamelCase_ = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCamelCase_ = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f"Unknown model name: {model_name}" )
lowerCamelCase_ = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ ,lowerCAmelCase__ ) ,organization='''nielsr''' ,commit_message='''Add model''' ,use_temp_dir=lowerCAmelCase__ ,)
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ ,lowerCAmelCase__ ) ,organization='''nielsr''' ,commit_message='''Add image processor''' ,use_temp_dir=lowerCAmelCase__ ,)
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
A_ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 29 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = '''ZinengTang/tvlt-base'''
lowerCamelCase_ = tempfile.mkdtemp()
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_feature_extractor()
lowerCamelCase_ = TvltProcessor(image_processor=UpperCAmelCase , feature_extractor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_feature_extractor()
lowerCamelCase_ = TvltProcessor(image_processor=UpperCAmelCase , feature_extractor=UpperCAmelCase )
lowerCamelCase_ = np.ones([1_2000] )
lowerCamelCase_ = feature_extractor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(audio=UpperCAmelCase , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_feature_extractor()
lowerCamelCase_ = TvltProcessor(image_processor=UpperCAmelCase , feature_extractor=UpperCAmelCase )
lowerCamelCase_ = np.ones([3, 224, 224] )
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_feature_extractor()
lowerCamelCase_ = TvltProcessor(image_processor=UpperCAmelCase , feature_extractor=UpperCAmelCase )
lowerCamelCase_ = np.ones([1_2000] )
lowerCamelCase_ = np.ones([3, 224, 224] )
lowerCamelCase_ = processor(audio=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_feature_extractor()
lowerCamelCase_ = TvltProcessor(image_processor=UpperCAmelCase , feature_extractor=UpperCAmelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 29 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 1 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 1 |
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=[] ):
lowerCamelCase_ = size[0] - overlap_pixels * 2
lowerCamelCase_ = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCamelCase_ = np.ones((size_y, size_x) ,dtype=np.uinta ) * 255
lowerCamelCase_ = np.pad(lowerCAmelCase__ ,mode='''linear_ramp''' ,pad_width=lowerCAmelCase__ ,end_values=0 )
if "l" in remove_borders:
lowerCamelCase_ = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCamelCase_ = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCamelCase_ = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCamelCase_ = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
return max(lowerCAmelCase__ ,min(lowerCAmelCase__ ,lowerCAmelCase__ ) )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
return (
clamp(rect[0] ,min[0] ,max[0] ),
clamp(rect[1] ,min[1] ,max[1] ),
clamp(rect[2] ,min[0] ,max[0] ),
clamp(rect[3] ,min[1] ,max[1] ),
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = list(lowerCAmelCase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCamelCase_ = clamp_rect(lowerCAmelCase__ ,[0, 0] ,[image_size[0], image_size[1]] )
return rect
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = Image.new('''RGB''' ,(tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) ,Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) ,(0, 0) ,)
result.paste(lowerCAmelCase__ ,(original_slice, 0) )
return result
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCamelCase_ = tile.crop(lowerCAmelCase__ )
return tile
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = n % d
return n - divisor
class __lowerCamelCase ( lowerCAmelCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 350 , ):
super().__init__(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , max_noise_level=UpperCAmelCase , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
torch.manual_seed(0 )
lowerCamelCase_ = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCamelCase_ = add_overlap_rect(UpperCAmelCase , UpperCAmelCase , image.size )
lowerCamelCase_ = image.crop(UpperCAmelCase )
lowerCamelCase_ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCamelCase_ = translated_slice_x - (original_image_slice / 2)
lowerCamelCase_ = max(0 , UpperCAmelCase )
lowerCamelCase_ = squeeze_tile(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = to_input.size
lowerCamelCase_ = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCamelCase_ = super(UpperCAmelCase , self ).__call__(image=UpperCAmelCase , **UpperCAmelCase ).images[0]
lowerCamelCase_ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCamelCase_ = unsqueeze_tile(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCamelCase_ = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
lowerCamelCase_ = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCAmelCase ) , mode='''L''' , )
final_image.paste(
UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 75 , UpperCAmelCase = 9.0 , UpperCAmelCase = 50 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 128 , UpperCAmelCase = 32 , UpperCAmelCase = 32 , ):
lowerCamelCase_ = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
lowerCamelCase_ = math.ceil(image.size[0] / tile_size )
lowerCamelCase_ = math.ceil(image.size[1] / tile_size )
lowerCamelCase_ = tcx * tcy
lowerCamelCase_ = 0
for y in range(UpperCAmelCase ):
for x in range(UpperCAmelCase ):
self._process_tile(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , prompt=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , noise_level=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def lowercase ( ):
# Run a demo
lowerCamelCase_ = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCamelCase_ = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase__ ,revision='''fp16''' ,torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to('''cuda''' )
lowerCamelCase_ = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(lowerCAmelCase__ ):
print(f"progress: {obj['progress']:.4f}" )
obj["image"].save('''diffusers_library_progress.jpg''' )
lowerCamelCase_ = pipe(image=lowerCAmelCase__ ,prompt='''Black font, white background, vector''' ,noise_level=40 ,callback=lowerCAmelCase__ )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 29 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 1 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __lowerCamelCase ( enum.Enum ):
a__: Any = 0
a__: Tuple = 1
a__: Optional[Any] = 2
@add_end_docstrings(lowerCAmelCase )
class __lowerCamelCase ( lowerCAmelCase ):
a__: Tuple = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCamelCase_ = None
if self.model.config.prefix is not None:
lowerCamelCase_ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCamelCase_ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._sanitize_parameters(prefix=UpperCAmelCase , **self._forward_params )
lowerCamelCase_ = {**self._preprocess_params, **preprocess_params}
lowerCamelCase_ = {**self._forward_params, **forward_params}
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ):
lowerCamelCase_ = {}
if prefix is not None:
lowerCamelCase_ = prefix
if prefix:
lowerCamelCase_ = self.tokenizer(
UpperCAmelCase , padding=UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=self.framework )
lowerCamelCase_ = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
''' [None, \'hole\']''' )
lowerCamelCase_ = handle_long_generation
preprocess_params.update(UpperCAmelCase )
lowerCamelCase_ = generate_kwargs
lowerCamelCase_ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
lowerCamelCase_ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
lowerCamelCase_ = ReturnType.TENSORS
if return_type is not None:
lowerCamelCase_ = return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase_ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase_ = self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
if len(UpperCAmelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowerCamelCase_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*UpperCAmelCase , **UpperCAmelCase )
def __call__( self , UpperCAmelCase , **UpperCAmelCase ):
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase="" , UpperCAmelCase=None , **UpperCAmelCase ):
lowerCamelCase_ = self.tokenizer(
prefix + prompt_text , padding=UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=self.framework )
lowerCamelCase_ = prompt_text
if handle_long_generation == "hole":
lowerCamelCase_ = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCamelCase_ = generate_kwargs['''max_new_tokens''']
else:
lowerCamelCase_ = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCamelCase_ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
lowerCamelCase_ = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
lowerCamelCase_ = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def UpperCAmelCase__ ( self , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = model_inputs['''input_ids''']
lowerCamelCase_ = model_inputs.get('''attention_mask''' , UpperCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = 1
else:
lowerCamelCase_ = input_ids.shape[0]
lowerCamelCase_ = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCamelCase_ = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
lowerCamelCase_ = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCamelCase_ = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCamelCase_ = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCamelCase_ = self.model.generate(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = generated_sequence.shape[0]
if self.framework == "pt":
lowerCamelCase_ = generated_sequence.reshape(UpperCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCamelCase_ = tf.reshape(UpperCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=ReturnType.FULL_TEXT , UpperCAmelCase=True ):
lowerCamelCase_ = model_outputs['''generated_sequence'''][0]
lowerCamelCase_ = model_outputs['''input_ids''']
lowerCamelCase_ = model_outputs['''prompt_text''']
lowerCamelCase_ = generated_sequence.numpy().tolist()
lowerCamelCase_ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCamelCase_ = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCamelCase_ = self.tokenizer.decode(
UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCamelCase_ = prompt_text + text[prompt_length:]
else:
lowerCamelCase_ = text[prompt_length:]
lowerCamelCase_ = {'''generated_text''': all_text}
records.append(UpperCAmelCase )
return records
| 29 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = 'blenderbot-small'
a__: Tuple = ['past_key_values']
a__: Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , UpperCAmelCase=5_0265 , UpperCAmelCase=512 , UpperCAmelCase=8 , UpperCAmelCase=2048 , UpperCAmelCase=16 , UpperCAmelCase=8 , UpperCAmelCase=2048 , UpperCAmelCase=16 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="gelu" , UpperCAmelCase=512 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0_2 , UpperCAmelCase=1 , UpperCAmelCase=False , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=2 , **UpperCAmelCase , ):
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = use_cache
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , forced_eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
class __lowerCamelCase ( lowerCAmelCase ):
@property
def UpperCAmelCase__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase_ = {0: '''batch'''}
lowerCamelCase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCamelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCamelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase_ , lowerCamelCase_ = self.num_layers
for i in range(UpperCAmelCase ):
lowerCamelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCamelCase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def UpperCAmelCase__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ = super().outputs
else:
lowerCamelCase_ = super(UpperCAmelCase , self ).outputs
if self.use_past:
lowerCamelCase_ , lowerCamelCase_ = self.num_layers
for i in range(UpperCAmelCase ):
lowerCamelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Generate decoder inputs
lowerCamelCase_ = seq_length if not self.use_past else 1
lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase_ = dict(**UpperCAmelCase , **UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase_ , lowerCamelCase_ = common_inputs['''input_ids'''].shape
lowerCamelCase_ = common_inputs['''decoder_input_ids'''].shape[1]
lowerCamelCase_ , lowerCamelCase_ = self.num_attention_heads
lowerCamelCase_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase_ = decoder_seq_length + 3
lowerCamelCase_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase_ = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(UpperCAmelCase , UpperCAmelCase )] , dim=1 )
lowerCamelCase_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase_ , lowerCamelCase_ = self.num_layers
lowerCamelCase_ = min(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = max(UpperCAmelCase , UpperCAmelCase ) - min_num_layers
lowerCamelCase_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
) )
# TODO: test this.
lowerCamelCase_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(UpperCAmelCase , UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) )
return common_inputs
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase_ , lowerCamelCase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCamelCase_ = seqlen + 2
lowerCamelCase_ , lowerCamelCase_ = self.num_layers
lowerCamelCase_ , lowerCamelCase_ = self.num_attention_heads
lowerCamelCase_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase_ = common_inputs['''attention_mask'''].dtype
lowerCamelCase_ = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
lowerCamelCase_ = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(UpperCAmelCase )
]
return common_inputs
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase_ = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase_ = tokenizer.num_special_tokens_to_add(UpperCAmelCase )
lowerCamelCase_ = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase_ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase_ = dict(tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase ) )
return common_inputs
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
elif self.task == "causal-lm":
lowerCamelCase_ = self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
else:
lowerCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
return common_inputs
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase_ = super()._flatten_past_key_values_(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
lowerCamelCase_ = super(UpperCAmelCase , self )._flatten_past_key_values_(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 29 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 | 1 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
A_ = TypeVar("""_T""")
class __lowerCamelCase ( Generic[_T] ):
def __init__( self , UpperCAmelCase = None ):
lowerCamelCase_ = list(iterable or [] )
lowerCamelCase_ = []
def __len__( self ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
return f"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def UpperCAmelCase__ ( self , UpperCAmelCase ):
self._stacka.append(UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self._stacka.pop
lowerCamelCase_ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 1 |
def __lowercase ( snake_case ):
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
__magic_name__ :Any = 4
__magic_name__ :Optional[Any] = (1 << p) - 1
for _ in range(p - 2 ):
__magic_name__ :Tuple = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29 | 0 |
from random import randint, random
def _A ( _lowercase , _lowercase , _lowercase , _lowercase = False , _lowercase = False , _lowercase = 5 , ) -> list:
"""simple docstring"""
__UpperCamelCase = [[-1] * number_of_cells] # Create a highway without any car
__UpperCamelCase = 0
__UpperCamelCase = max(_lowercase , 0 )
while i < number_of_cells:
__UpperCamelCase = (
randint(0 , _lowercase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = highway_now[car_index + 1 :]
for cell in range(len(_lowercase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_lowercase , -1 )
def _A ( _lowercase , _lowercase , _lowercase ) -> list:
"""simple docstring"""
__UpperCamelCase = len(_lowercase )
# Beforce calculations, the highway is empty
__UpperCamelCase = [-1] * number_of_cells
for car_index in range(_lowercase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__UpperCamelCase = min(highway_now[car_index] + 1 , _lowercase )
# Number of empty cell before the next car
__UpperCamelCase = get_distance(_lowercase , _lowercase ) - 1
# We can't have the car causing an accident
__UpperCamelCase = min(next_highway[car_index] , _lowercase )
if random() < probability:
# Randomly, a driver will slow down
__UpperCamelCase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _A ( _lowercase , _lowercase , _lowercase , _lowercase ) -> list:
"""simple docstring"""
__UpperCamelCase = len(highway[0] )
for i in range(_lowercase ):
__UpperCamelCase = update(highway[i] , _lowercase , _lowercase )
__UpperCamelCase = [-1] * number_of_cells
for car_index in range(_lowercase ):
__UpperCamelCase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__UpperCamelCase = (car_index + speed) % number_of_cells
# Commit the change of position
__UpperCamelCase = speed
highway.append(_lowercase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Any , __lowerCAmelCase : List[Any] , ) -> Tuple:
_A = parent
_A = 13
_A = 7
_A = True
_A = True
_A = False
_A = True
_A = 99
_A = 32
_A = 2
_A = 4
_A = 37
_A = '''gelu'''
_A = 0.1
_A = 0.1
_A = 5_12
_A = 16
_A = 2
_A = 0.02
_A = 3
_A = 4
_A = None
def snake_case_ ( self : str ) -> Optional[int]:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Any:
_A = TFDistilBertModel(config=__lowerCAmelCase )
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_A = model(__lowerCAmelCase )
_A = [input_ids, input_mask]
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = TFDistilBertForMaskedLM(config=__lowerCAmelCase )
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> Optional[Any]:
_A = TFDistilBertForQuestionAnswering(config=__lowerCAmelCase )
_A = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> Tuple:
_A = self.num_labels
_A = TFDistilBertForSequenceClassification(__lowerCAmelCase )
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ) -> str:
_A = self.num_choices
_A = TFDistilBertForMultipleChoice(__lowerCAmelCase )
_A = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ) -> Dict:
_A = self.num_labels
_A = TFDistilBertForTokenClassification(__lowerCAmelCase )
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_A = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : Optional[int] ) -> Any:
_A = self.prepare_config_and_inputs()
((_A) , (_A) , (_A) , (_A) , (_A) , (_A)) = config_and_inputs
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( _A , _A , unittest.TestCase):
"""simple docstring"""
a__ : Optional[int] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a__ : Dict = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ : str = False
a__ : Optional[Any] = False
def snake_case_ ( self : int ) -> Optional[int]:
_A = TFDistilBertModelTester(self )
_A = ConfigTester(self , config_class=__lowerCAmelCase , dim=37 )
def snake_case_ ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def snake_case_ ( self : Optional[Any] ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__lowerCAmelCase )
@slow
def snake_case_ ( self : List[str] ) -> List[str]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_A = TFDistilBertModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def snake_case_ ( self : Dict ) -> Union[str, Any]:
_A = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_A = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A = model(__lowerCAmelCase )[0]
_A = [1, 6, 7_68]
self.assertEqual(output.shape , __lowerCAmelCase )
_A = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
| 2 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 3 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 | 0 |
"""simple docstring"""
import os
import sys
import unittest
__UpperCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__UpperCamelCase : Dict = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__UpperCamelCase : int = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = get_test_to_tester_mapping(_snake_case )
lowerCAmelCase = get_test_to_tester_mapping(_snake_case )
lowerCAmelCase = {'BertModelTest': 'BertModelTester'}
lowerCAmelCase = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case )
self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = get_model_to_test_mapping(_snake_case )
lowerCAmelCase = get_model_to_test_mapping(_snake_case )
lowerCAmelCase = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowerCAmelCase = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case )
self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = get_model_to_tester_mapping(_snake_case )
lowerCAmelCase = get_model_to_tester_mapping(_snake_case )
lowerCAmelCase = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowerCAmelCase = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case )
self.assertEqual(get_test_info.to_json(_snake_case ) , _snake_case )
| 4 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_lowerCamelCase = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_lowerCamelCase = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
_lowerCamelCase = [file for file in filepaths if ' ' in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
_lowerCamelCase = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
_lowerCamelCase = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
_lowerCamelCase = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 6 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 0 |
"""simple docstring"""
from math import factorial, pi
def _snake_case ( _snake_case : float , _snake_case : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(_snake_case , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(_snake_case , _snake_case ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
_A = float(_snake_case )
_A = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_snake_case ) )
def _snake_case ( _snake_case : float , _snake_case : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(_snake_case , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(_snake_case , _snake_case ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
_A = float(_snake_case )
_A = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 7 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29 | 0 |
'''simple docstring'''
import os
from pathlib import Path
def _lowerCAmelCase ( ) -> int:
from torch.utils.cpp_extension import load
__A : Tuple = Path(__snake_case ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
__A : Any = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , __snake_case , with_cuda=__snake_case , extra_include_paths=[str(__snake_case )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA | 8 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 0 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "unispeech-sat"
def __init__( self : List[Any] , _A : Dict=32 , _A : int=768 , _A : str=12 , _A : str=12 , _A : Any=3072 , _A : List[str]="gelu" , _A : Any=0.1 , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=0.0 , _A : List[str]=0.0 , _A : Optional[Any]=0.1 , _A : str=0.1 , _A : List[str]=0.02 , _A : Optional[int]=1e-5 , _A : Dict="group" , _A : str="gelu" , _A : List[str]=(512, 512, 512, 512, 512, 512, 512) , _A : Any=(5, 2, 2, 2, 2, 2, 2) , _A : Dict=(10, 3, 3, 3, 3, 2, 2) , _A : Union[str, Any]=False , _A : str=128 , _A : Tuple=16 , _A : Optional[int]=False , _A : Dict=True , _A : Optional[Any]=0.05 , _A : Any=10 , _A : str=2 , _A : Dict=0.0 , _A : List[str]=10 , _A : Union[str, Any]=0 , _A : List[str]=320 , _A : List[Any]=2 , _A : Optional[Any]=0.1 , _A : Optional[Any]=100 , _A : List[str]=256 , _A : Any=256 , _A : List[Any]=0.1 , _A : Dict="mean" , _A : Dict=False , _A : List[str]=False , _A : List[Any]=256 , _A : Any=(512, 512, 512, 512, 1500) , _A : Any=(5, 3, 3, 1, 1) , _A : Dict=(1, 2, 3, 1, 1) , _A : str=512 , _A : Dict=0 , _A : List[str]=1 , _A : Tuple=2 , _A : Optional[Any]=504 , **_A : int , ):
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
_UpperCamelCase = hidden_size
_UpperCamelCase = feat_extract_norm
_UpperCamelCase = feat_extract_activation
_UpperCamelCase = list(_A )
_UpperCamelCase = list(_A )
_UpperCamelCase = list(_A )
_UpperCamelCase = conv_bias
_UpperCamelCase = num_conv_pos_embeddings
_UpperCamelCase = num_conv_pos_embedding_groups
_UpperCamelCase = len(self.conv_dim )
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = feat_proj_dropout
_UpperCamelCase = final_dropout
_UpperCamelCase = layerdrop
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = vocab_size
_UpperCamelCase = num_clusters
_UpperCamelCase = do_stable_layer_norm
_UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
_UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_UpperCamelCase = num_codevectors_per_group
_UpperCamelCase = num_codevector_groups
_UpperCamelCase = contrastive_logits_temperature
_UpperCamelCase = feat_quantizer_dropout
_UpperCamelCase = num_negatives
_UpperCamelCase = codevector_dim
_UpperCamelCase = proj_codevector_dim
_UpperCamelCase = diversity_loss_weight
# ctc loss
_UpperCamelCase = ctc_loss_reduction
_UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase = list(_A )
_UpperCamelCase = list(_A )
_UpperCamelCase = list(_A )
_UpperCamelCase = xvector_output_dim
@property
def UpperCamelCase_ ( self : List[str] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 10 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = 'rwkv'
__lowerCamelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__(self , A=50_277 , A=1_024 , A=4_096 , A=32 , A=None , A=None , A=1E-5 , A=0 , A=0 , A=6 , A=False , A=True , **A , ) -> Union[str, Any]:
"""simple docstring"""
_a = vocab_size
_a = context_length
_a = hidden_size
_a = num_hidden_layers
_a = attention_hidden_size if attention_hidden_size is not None else hidden_size
_a = intermediate_size if intermediate_size is not None else 4 * hidden_size
_a = layer_norm_epsilon
_a = rescale_every
_a = use_cache
_a = bos_token_id
_a = eos_token_id
super().__init__(
tie_word_embeddings=A , bos_token_id=A , eos_token_id=A , **A )
| 11 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = """ylacombe/bark-small"""
lowercase__ : Dict = tempfile.mkdtemp()
lowercase__ : Any = """en_speaker_1"""
lowercase__ : Optional[int] = """This is a test string"""
lowercase__ : Tuple = """speaker_embeddings_path.json"""
lowercase__ : str = """speaker_embeddings"""
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : int = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_)
processor.save_pretrained(self.tmpdirname)
lowercase__ : List[str] = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase__ : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowercase__ : Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase__ : Optional[int] = 35
lowercase__ : Tuple = 2
lowercase__ : Dict = 8
lowercase__ : Optional[int] = {
"""semantic_prompt""": np.ones(SCREAMING_SNAKE_CASE_),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len)),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
lowercase__ : Tuple = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([])).tolist())
# test loading voice preset from npz file
lowercase__ : List[Any] = os.path.join(self.tmpdirname , """file.npz""")
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : str = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([])).tolist())
# test loading voice preset from the hub
lowercase__ : int = processor(text=self.input_string , voice_preset=self.voice_preset)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : str = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = processor(text=self.input_string)
lowercase__ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 12 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 0 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def UpperCAmelCase__ ( ) -> List[str]:
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=UpperCAmelCase_ , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=UpperCAmelCase_ , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=UpperCAmelCase_ , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=UpperCAmelCase_ , default=0 , help='cuda_id.' , )
__lowerCamelCase : Union[str, Any] = parser.parse_args()
return args
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ) -> Optional[Any]:
if not len(UpperCAmelCase_ ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
__lowerCamelCase , __lowerCamelCase : Tuple = imgs[0].size
__lowerCamelCase : Any = Image.new('RGB' , size=(cols * w, rows * h) )
__lowerCamelCase , __lowerCamelCase : Dict = grid.size
for i, img in enumerate(UpperCAmelCase_ ):
grid.paste(UpperCAmelCase_ , box=(i % cols * w, i // cols * h) )
return grid
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any]="robotic cat with wings" , UpperCAmelCase_ : Dict=7.5 , UpperCAmelCase_ : Optional[Any]=50 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : Tuple=42 , ) -> str:
__lowerCamelCase : Dict = torch.Generator(pipeline.device ).manual_seed(UpperCAmelCase_ )
__lowerCamelCase : List[str] = pipeline(
UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , ).images
__lowerCamelCase : Dict = int(math.sqrt(UpperCAmelCase_ ) )
__lowerCamelCase : List[Any] = image_grid(UpperCAmelCase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
A__ : Any = parse_args()
# Load models and create wrapper for stable diffusion
A__ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
A__ : Optional[int] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
A__ : List[Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
A__ : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
A__ : Any = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
A__ : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
A__ : Optional[Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
A__ : List[Any] = unet.to(torch.device("""cuda""", args.cuda_id))
A__ : str = pipeline.to(unet.device)
A__ , A__ : int = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
A__ : Union[str, Any] = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 13 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3_2 , _a=3 , _a=4 , _a=[1_0, 2_0, 3_0, 4_0] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=3_7 , _a="gelu" , _a=1_0 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=3 , _a=None , ) -> Any:
_a : Dict = parent
_a : List[str] = batch_size
_a : Any = image_size
_a : Optional[int] = num_channels
_a : List[Any] = num_stages
_a : List[str] = hidden_sizes
_a : Tuple = depths
_a : Any = is_training
_a : Optional[int] = use_labels
_a : Optional[Any] = intermediate_size
_a : Optional[int] = hidden_act
_a : Dict = type_sequence_label_size
_a : str = initializer_range
_a : Optional[int] = out_features
_a : List[str] = num_labels
_a : Optional[Any] = scope
_a : Optional[int] = num_stages
def __lowercase ( self ) -> List[Any]:
_a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Tuple = None
if self.use_labels:
_a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Any = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> Dict:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowercase ( self ) -> Union[str, Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=_a , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def __lowercase ( self , _a , _a , _a ) -> Any:
_a : Dict = UperNetForSemanticSegmentation(config=_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowercase ( self ) -> Optional[Any]:
_a : Tuple = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) : Dict = config_and_inputs
_a : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
def __lowercase ( self ) -> str:
_a : Dict = UperNetModelTester(self )
_a : Tuple = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=3_7 )
def __lowercase ( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self ) -> Optional[int]:
return
def __lowercase ( self ) -> Tuple:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(_a )
_a : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Any = [*signature.parameters.keys()]
_a : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> Tuple:
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def __lowercase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def __lowercase ( self ) -> Dict:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowercase ( self ) -> Dict:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowercase ( self ) -> int:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowercase ( self ) -> Any:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> List[str]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Union[str, Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Any = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : int = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> int:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] = _config_zero_init(_a )
_a : Tuple = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def __lowercase ( self ) -> List[Any]:
pass
@slow
def __lowercase ( self ) -> Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Tuple = UperNetForSemanticSegmentation.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
_a : str = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' ,repo_type='''dataset''' ,filename='''ADE_val_00000001.jpg''' )
_a : Optional[int] = Image.open(__a ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : List[str] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
_a : List[str] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_a )
_a : List[str] = prepare_img()
_a : Union[str, Any] = processor(images=_a , return_tensors='''pt''' ).to(_a )
with torch.no_grad():
_a : Union[str, Any] = model(**_a )
_a : str = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _a )
_a : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _a , atol=1e-4 ) )
def __lowercase ( self ) -> str:
_a : Tuple = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
_a : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_a )
_a : Dict = prepare_img()
_a : Union[str, Any] = processor(images=_a , return_tensors='''pt''' ).to(_a )
with torch.no_grad():
_a : Dict = model(**_a )
_a : int = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _a )
_a : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _a , atol=1e-4 ) )
| 14 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : Union[str, Any]=False ) -> Dict:
"""simple docstring"""
try:
lowercase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ = strtobool(__magic_name__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
A : int = parse_flag_from_env('RUN_SLOW', default=False)
def UpperCamelCase ( __magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
return unittest.skip("""Test was skipped""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : List[Any] ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : str ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Optional[Any]=None , __magic_name__ : Optional[Any]=None ) -> Optional[int]:
"""simple docstring"""
if test_case is None:
return partial(__magic_name__ , version=__magic_name__ )
return unittest.skipUnless(is_torch_version(""">=""" , __magic_name__ ) , f'''test requires torch version >= {version}''' )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : int ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : int ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Dict ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(__magic_name__ )
A : Any = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(__magic_name__ )
class A ( unittest.TestCase ):
'''simple docstring'''
A__ = True
@classmethod
def lowerCamelCase__ (cls : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
@classmethod
def lowerCamelCase__ (cls : str ) -> Optional[Any]:
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCamelCase__ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCAmelCase )
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Union[mock.Mock, List[mock.Mock]] ) -> int:
"""simple docstring"""
lowercase__ = mocks if isinstance(_UpperCAmelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def UpperCamelCase ( __magic_name__ : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = AcceleratorState()
lowercase__ = tensor[None].clone().to(state.device )
lowercase__ = gather(__magic_name__ ).cpu()
lowercase__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __magic_name__ ):
return False
return True
class A :
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = returncode
lowercase__ = stdout
lowercase__ = stderr
async def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
while True:
lowercase__ = await stream.readline()
if line:
callback(__magic_name__ )
else:
break
async def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : Dict=None , __magic_name__ : List[Any]=None , __magic_name__ : Tuple=None , __magic_name__ : int=False , __magic_name__ : Any=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(__magic_name__ ) )
lowercase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__magic_name__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__magic_name__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ = []
lowercase__ = []
def tee(__magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : Any="" ):
lowercase__ = line.decode("""utf-8""" ).rstrip()
sink.append(__magic_name__ )
if not quiet:
print(__magic_name__ , __magic_name__ , file=__magic_name__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __magic_name__ : tee(__magic_name__ , __magic_name__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __magic_name__ : tee(__magic_name__ , __magic_name__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=__magic_name__ , )
return _RunOutput(await p.wait() , __magic_name__ , __magic_name__ )
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : str=None , __magic_name__ : Dict=None , __magic_name__ : Tuple=180 , __magic_name__ : str=False , __magic_name__ : Any=True ) -> _RunOutput:
"""simple docstring"""
lowercase__ = asyncio.get_event_loop()
lowercase__ = loop.run_until_complete(
_stream_subprocess(__magic_name__ , env=__magic_name__ , stdin=__magic_name__ , timeout=__magic_name__ , quiet=__magic_name__ , echo=__magic_name__ ) )
lowercase__ = """ """.join(__magic_name__ )
if result.returncode > 0:
lowercase__ = """\n""".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class A ( UpperCAmelCase__ ):
'''simple docstring'''
pass
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[int]=False ) -> Dict:
"""simple docstring"""
try:
lowercase__ = subprocess.check_output(__magic_name__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__magic_name__ , """decode""" ):
lowercase__ = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(__magic_name__ )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 15 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
from __future__ import annotations
def __a ( A__ : list[int | str] ):
create_state_space_tree(A__ , [] , 0 , [0 for i in range(len(A__ ) )] )
def __a ( A__ : list[int | str] , A__ : list[int | str] , A__ : int , A__ : list[int] , ):
if index == len(A__ ):
print(A__ )
return
for i in range(len(A__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE = True
create_state_space_tree(A__ , A__ , index + 1 , A__ )
current_sequence.pop()
SCREAMING_SNAKE_CASE = False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 16 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 0 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 0 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ) -> str:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ) -> Optional[int]:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = BioGptModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Tuple:
_lowerCAmelCase = BioGptForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = BioGptModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# create attention mask
_lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=_lowerCAmelCase )
_lowerCAmelCase = self.seq_length // 2
_lowerCAmelCase = 0
# first forward pass
_lowerCAmelCase , _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_lowerCAmelCase = ids_tensor((1,) , _lowerCAmelCase ).item() + 1
_lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_lowerCAmelCase = random_other_next_tokens
# append to next input_ids and attn_mask
_lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_lowerCAmelCase )] , dim=1 , )
# get two different outputs
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )["last_hidden_state"]
_lowerCAmelCase = model(_lowerCAmelCase , past_key_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )["last_hidden_state"]
# select random slice
_lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Dict:
_lowerCAmelCase = BioGptModel(config=_lowerCAmelCase ).to(_lowerCAmelCase ).eval()
_lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=_lowerCAmelCase )
# first forward pass
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )["last_hidden_state"]
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase )[
"last_hidden_state"
]
# select random slice
_lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , _lowerCAmelCase=False ) -> Optional[int]:
_lowerCAmelCase = BioGptForCausalLM(_lowerCAmelCase )
model.to(_lowerCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _snake_case ( self , _lowerCAmelCase , *_lowerCAmelCase ) -> int:
_lowerCAmelCase = BioGptModel(_lowerCAmelCase )
_lowerCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = BioGptForTokenClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[int] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__lowerCamelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : str = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : int = False
def _snake_case ( self ) -> str:
_lowerCAmelCase = BioGptModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _snake_case ( self ) -> Dict:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_lowerCAmelCase , gradient_checkpointing=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_lowerCAmelCase )
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_lowerCAmelCase )
@slow
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(_lowerCAmelCase )
_lowerCAmelCase = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
_lowerCAmelCase = "left"
# Define PAD Token = EOS Token = 50256
_lowerCAmelCase = tokenizer.eos_token
_lowerCAmelCase = model.config.eos_token_id
# use different length sentences to test batching
_lowerCAmelCase = [
"Hello, my dog is a little",
"Today, I",
]
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" , padding=_lowerCAmelCase )
_lowerCAmelCase = inputs["input_ids"].to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(
input_ids=_lowerCAmelCase , attention_mask=inputs["attention_mask"].to(_lowerCAmelCase ) , )
_lowerCAmelCase = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(input_ids=_lowerCAmelCase )
_lowerCAmelCase = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
_lowerCAmelCase = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(input_ids=_lowerCAmelCase , max_length=model.config.max_length - num_paddings )
_lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def _snake_case ( self ) -> Tuple:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = BioGptModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _snake_case ( self ) -> int:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = input_dict["input_ids"]
_lowerCAmelCase = input_ids.ne(1 ).to(_lowerCAmelCase )
_lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase = BioGptForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = "multi_label_classification"
_lowerCAmelCase = input_dict["input_ids"]
_lowerCAmelCase = input_ids.ne(1 ).to(_lowerCAmelCase )
_lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCAmelCase = BioGptForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> str:
_lowerCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
_lowerCAmelCase = torch.tensor([[2, 4805, 9, 656, 21]] )
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 42384
_lowerCAmelCase = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
_lowerCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(_lowerCAmelCase )
torch.manual_seed(0 )
_lowerCAmelCase = tokenizer("COVID-19 is" , return_tensors="pt" ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(
**_lowerCAmelCase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=_lowerCAmelCase , )
_lowerCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 18 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 0 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ''
lowercase__ = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __a = None , __a = None , **__a , ) -> int:
'''simple docstring'''
super().__init__(self , **__a)
_UpperCamelCase = repo_info
_UpperCamelCase = token
_UpperCamelCase = None
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
if self.dir_cache is None:
_UpperCamelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCamelCase = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__a): {'''name''': str(__a), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def UpperCAmelCase ( self , __a , __a = "rb" , **__a , ) -> List[str]:
'''simple docstring'''
if not isinstance(self.repo_info , __a):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''')
_UpperCamelCase = hf_hub_url(self.repo_info.id , __a , revision=self.repo_info.sha)
return fsspec.open(
__a , mode=__a , headers=get_authentication_headers_for_url(__a , use_auth_token=self.token) , client_kwargs={'''trust_env''': True} , ).open()
def UpperCAmelCase ( self , __a , **__a) -> Dict:
'''simple docstring'''
self._get_dirs()
_UpperCamelCase = self._strip_protocol(__a)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__a)
def UpperCAmelCase ( self , __a , __a=False , **__a) -> str:
'''simple docstring'''
self._get_dirs()
_UpperCamelCase = PurePosixPath(path.strip('''/'''))
_UpperCamelCase = {}
for p, f in self.dir_cache.items():
_UpperCamelCase = PurePosixPath(p.strip('''/'''))
_UpperCamelCase = p.parent
if root == path:
_UpperCamelCase = f
_UpperCamelCase = list(paths.values())
if detail:
return out
else:
return sorted(f['''name'''] for f in out)
| 19 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.