code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor __lowerCAmelCase : Any = logging.get_logger(__name__) class A ( UpperCAmelCase__ ): def __init__( self : Dict , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> None: warnings.warn( '''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use LayoutLMv2ImageProcessor instead.''' , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
710
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ): """simple docstring""" __UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" __UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' ) __UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
654
0
'''simple docstring''' class A : def __init__( self : List[Any] ) -> Union[str, Any]: __UpperCAmelCase = {} # Mapping from char to TrieNode __UpperCAmelCase = False def snake_case__ ( self : Optional[Any] , __a : Optional[Any] ) -> str: for word in words: self.insert(UpperCAmelCase_ ) def snake_case__ ( self : Optional[Any] , __a : Union[str, Any] ) -> Any: __UpperCAmelCase = self for char in word: if char not in curr.nodes: __UpperCAmelCase = TrieNode() __UpperCAmelCase = curr.nodes[char] __UpperCAmelCase = True def snake_case__ ( self : Union[str, Any] , __a : Any ) -> List[Any]: __UpperCAmelCase = self for char in word: if char not in curr.nodes: return False __UpperCAmelCase = curr.nodes[char] return curr.is_leaf def snake_case__ ( self : str , __a : str ) -> int: def _delete(__a : Optional[int] , __a : Optional[int] , __a : Dict ) -> bool: if index == len(UpperCAmelCase_ ): # If word does not exist if not curr.is_leaf: return False __UpperCAmelCase = False return len(curr.nodes ) == 0 __UpperCAmelCase = word[index] __UpperCAmelCase = curr.nodes.get(UpperCAmelCase_ ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted __UpperCAmelCase = _delete(UpperCAmelCase_ , UpperCAmelCase_ , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , UpperCAmelCase_ , 0 ) def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple ): """simple docstring""" if node.is_leaf: print(_snake_case , end=''' ''' ) for key, value in node.nodes.items(): print_words(_snake_case , word + key ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split() __UpperCAmelCase = TrieNode() root.insert_many(_snake_case ) # print_words(root, "") assert all(root.find(_snake_case ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ): """simple docstring""" print(str(_snake_case ) , '''works!''' if passes else '''doesn\'t work :(''' ) def lowerCAmelCase ( ): """simple docstring""" assert test_trie() def lowerCAmelCase ( ): """simple docstring""" print_results('''Testing trie functionality''' , test_trie() ) if __name__ == "__main__": main()
711
'''simple docstring''' from __future__ import annotations from statistics import mean def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = [0] * no_of_processes __UpperCAmelCase = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(UpperCamelCase__ ): __UpperCAmelCase = burst_time[i] __UpperCAmelCase = [] __UpperCAmelCase = 0 __UpperCAmelCase = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: __UpperCAmelCase = [] __UpperCAmelCase = -1 for i in range(UpperCamelCase__ ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: __UpperCAmelCase = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: __UpperCAmelCase = i total_time += burst_time[target_process] completed += 1 __UpperCAmelCase = 0 __UpperCAmelCase = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ): """simple docstring""" __UpperCAmelCase = [0] * no_of_processes for i in range(UpperCamelCase__ ): __UpperCAmelCase = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("[TEST CASE 01]") __lowerCAmelCase : List[Any] = 4 __lowerCAmelCase : List[Any] = [2, 5, 3, 7] __lowerCAmelCase : Tuple = [0, 0, 0, 0] __lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes) __lowerCAmelCase : Dict = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time") for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
654
0
'''simple docstring''' from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image __lowerCAmelCase : Optional[Any] = ['text', 'image', 'audio'] def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase = [] for input_type in input_types: if input_type == "text": inputs.append('''Text input''' ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((5_1_2, 5_1_2) ) ) elif input_type == "audio": inputs.append(torch.ones(3_0_0_0 ) ) elif isinstance(_lowercase , _lowercase ): inputs.append(create_inputs(_lowercase ) ) else: raise ValueError(f"""Invalid type requested: {input_type}""" ) return inputs def lowerCAmelCase ( UpperCamelCase__ : Dict ): """simple docstring""" __UpperCAmelCase = [] for output in outputs: if isinstance(_lowercase , (str, AgentText) ): output_types.append('''text''' ) elif isinstance(_lowercase , (Image.Image, AgentImage) ): output_types.append('''image''' ) elif isinstance(_lowercase , (torch.Tensor, AgentAudio) ): output_types.append('''audio''' ) else: raise ValueError(f"""Invalid output: {output}""" ) return output_types @is_tool_test class A : def snake_case__ ( self : Union[str, Any] ) -> Any: self.assertTrue(hasattr(self.tool , '''inputs''' ) ) self.assertTrue(hasattr(self.tool , '''outputs''' ) ) __UpperCAmelCase = self.tool.inputs for _input in inputs: if isinstance(_input , UpperCamelCase__ ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) __UpperCAmelCase = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def snake_case__ ( self : Any ) -> List[str]: __UpperCAmelCase = create_inputs(self.tool.inputs ) __UpperCAmelCase = self.tool(*UpperCamelCase__ ) # There is a single output if len(self.tool.outputs ) == 1: __UpperCAmelCase = [outputs] self.assertListEqual(output_types(UpperCamelCase__ ) , self.tool.outputs ) def snake_case__ ( self : Any ) -> int: self.assertTrue(hasattr(self.tool , '''description''' ) ) self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) ) self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) ) def snake_case__ ( self : Any ) -> Optional[int]: __UpperCAmelCase = create_inputs(self.tool.inputs ) __UpperCAmelCase = self.tool(*UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): __UpperCAmelCase = [outputs] self.assertEqual(len(UpperCamelCase__ ) , len(self.tool.outputs ) ) for output, output_type in zip(UpperCamelCase__ , self.tool.outputs ): __UpperCAmelCase = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) ) def snake_case__ ( self : str ) -> Tuple: __UpperCAmelCase = create_inputs(self.tool.inputs ) __UpperCAmelCase = [] for _input, input_type in zip(UpperCamelCase__ , self.tool.inputs ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error __UpperCAmelCase = self.tool(*UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): __UpperCAmelCase = [outputs] self.assertEqual(len(UpperCamelCase__ ) , len(self.tool.outputs ) )
712
'''simple docstring''' from ..utils import DummyObject, requires_backends class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : int , **__a : int ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict: requires_backends(cls , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : str , **__a : Any ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : Any , **__a : int ) -> Tuple: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]: requires_backends(cls , ['''torch'''] )
654
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCAmelCase : str = { "configuration_swiftformer": [ "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwiftFormerConfig", "SwiftFormerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = [ "SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SwiftFormerForImageClassification", "SwiftFormerModel", "SwiftFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
713
'''simple docstring''' import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
654
0
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
714
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : Optional[Any] = { "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = ["LlamaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = ["LlamaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = [ "LlamaForCausalLM", "LlamaModel", "LlamaPreTrainedModel", "LlamaForSequenceClassification", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
654
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Optional[Any] = { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class A ( lowercase__ ): a_ = """roformer""" def __init__( self : Optional[int] , __a : Union[str, Any]=5_0_0_0_0 , __a : Tuple=None , __a : Union[str, Any]=7_6_8 , __a : Union[str, Any]=1_2 , __a : Any=1_2 , __a : int=3_0_7_2 , __a : Tuple="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Union[str, Any]=1_5_3_6 , __a : List[Any]=2 , __a : Optional[int]=0.0_2 , __a : List[Any]=1e-12 , __a : int=0 , __a : List[Any]=False , __a : Any=True , **__a : Dict , ) -> Optional[int]: super().__init__(pad_token_id=__lowercase , **__lowercase ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size if embedding_size is None else embedding_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = hidden_act __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = type_vocab_size __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = rotary_value __UpperCAmelCase = use_cache class A ( lowercase__ ): @property def snake_case__ ( self : Dict ) -> List[Any]: if self.task == "multiple-choice": __UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} __UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
715
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ): """simple docstring""" __UpperCAmelCase = {} if train_file is not None: __UpperCAmelCase = [train_file] if eval_file is not None: __UpperCAmelCase = [eval_file] if test_file is not None: __UpperCAmelCase = [test_file] __UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ ) __UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() ) __UpperCAmelCase = features_name.pop(UpperCamelCase__ ) __UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) ) __UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )} __UpperCAmelCase = tokenizer.model_input_names __UpperCAmelCase = {} if len(UpperCamelCase__ ) == 1: for k in files.keys(): __UpperCAmelCase = ds[k].map( lambda UpperCamelCase__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , ) elif len(UpperCamelCase__ ) == 2: for k in files.keys(): __UpperCAmelCase = ds[k].map( lambda UpperCamelCase__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid __lowerCAmelCase : List[Any] = logging.getLogger(__name__) @dataclass class A : a_ = field(metadata={'''help''': '''Which column contains the label'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} ) a_ = field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class A : a_ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def lowerCAmelCase ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ f"""16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , ) def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict: __UpperCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __UpperCAmelCase = TFTrainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCAmelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __UpperCAmelCase = trainer.evaluate() __UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(UpperCamelCase__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) results.update(UpperCamelCase__ ) return results if __name__ == "__main__": main()
654
0
import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A ( __snake_case , unittest.TestCase ): a_ = KandinskyVaaControlnetPipeline a_ = ['image_embeds', 'negative_image_embeds', 'hint'] a_ = ['image_embeds', 'negative_image_embeds', 'hint'] a_ = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] a_ = False @property def snake_case__ ( self : List[str] ) -> Dict: return 3_2 @property def snake_case__ ( self : Any ) -> Optional[Any]: return 3_2 @property def snake_case__ ( self : Tuple ) -> Any: return self.time_input_dim @property def snake_case__ ( self : int ) -> Union[str, Any]: return self.time_input_dim * 4 @property def snake_case__ ( self : str ) -> Any: return 1_0_0 @property def snake_case__ ( self : List[Any] ) -> str: torch.manual_seed(0 ) __UpperCAmelCase = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __UpperCAmelCase = UNetaDConditionModel(**A_ ) return model @property def snake_case__ ( self : int ) -> List[str]: return { "block_out_channels": [3_2, 3_2, 6_4, 6_4], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def snake_case__ ( self : Optional[int] ) -> str: torch.manual_seed(0 ) __UpperCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def snake_case__ ( self : Dict ) -> List[str]: __UpperCAmelCase = self.dummy_unet __UpperCAmelCase = self.dummy_movq __UpperCAmelCase = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=A_ , ) __UpperCAmelCase = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def snake_case__ ( self : int , __a : Optional[Any] , __a : Union[str, Any]=0 ) -> List[str]: __UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ ) __UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( A_ ) # create hint __UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(A_ ) ).to(A_ ) if str(A_ ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(A_ ) else: __UpperCAmelCase = torch.Generator(device=A_ ).manual_seed(A_ ) __UpperCAmelCase = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 6_4, "width": 6_4, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def snake_case__ ( self : Dict ) -> List[Any]: __UpperCAmelCase = "cpu" __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = self.pipeline_class(**A_ ) __UpperCAmelCase = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) __UpperCAmelCase = pipe(**self.get_dummy_inputs(A_ ) ) __UpperCAmelCase = output.images __UpperCAmelCase = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] __UpperCAmelCase = image[0, -3:, -3:, -1] __UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCAmelCase = np.array( [0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class A ( unittest.TestCase ): def snake_case__ ( self : str ) -> List[Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : Optional[int] ) -> Union[str, Any]: __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/hint_image_cat.png''' ) __UpperCAmelCase = torch.from_numpy(np.array(A_ ) ).float() / 2_5_5.0 __UpperCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) __UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) __UpperCAmelCase = KandinskyVaaControlnetPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa ) __UpperCAmelCase = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) __UpperCAmelCase = "A robot, 4k photo" __UpperCAmelCase = torch.Generator(device='''cuda''' ).manual_seed(0 ) __UpperCAmelCase = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() __UpperCAmelCase = torch.Generator(device='''cuda''' ).manual_seed(0 ) __UpperCAmelCase = pipeline( image_embeds=A_ , negative_image_embeds=A_ , hint=A_ , generator=A_ , num_inference_steps=1_0_0 , output_type='''np''' , ) __UpperCAmelCase = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert_mean_pixel_difference(A_ , A_ )
716
'''simple docstring''' from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class A : def __init__( self : List[Any] , __a : Any , ) -> Dict: __UpperCAmelCase = parent __UpperCAmelCase = 1_3 __UpperCAmelCase = 7 __UpperCAmelCase = True __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = True __UpperCAmelCase = 9_9 __UpperCAmelCase = 3_2 __UpperCAmelCase = 2 __UpperCAmelCase = 4 __UpperCAmelCase = 3_7 __UpperCAmelCase = '''gelu''' __UpperCAmelCase = 0.1 __UpperCAmelCase = 0.1 __UpperCAmelCase = 5_1_2 __UpperCAmelCase = 1_6 __UpperCAmelCase = 2 __UpperCAmelCase = 0.0_2 __UpperCAmelCase = 3 __UpperCAmelCase = 4 __UpperCAmelCase = None def snake_case__ ( self : Optional[int] ) -> Dict: __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase = None if self.use_input_mask: __UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any: __UpperCAmelCase = TFDistilBertModel(config=__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) __UpperCAmelCase = [input_ids, input_mask] __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int: __UpperCAmelCase = TFDistilBertForMaskedLM(config=__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict: __UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a ) __UpperCAmelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, } __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict: __UpperCAmelCase = self.num_labels __UpperCAmelCase = TFDistilBertForSequenceClassification(__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str: __UpperCAmelCase = self.num_choices __UpperCAmelCase = TFDistilBertForMultipleChoice(__a ) __UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, } __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int: __UpperCAmelCase = self.num_labels __UpperCAmelCase = TFDistilBertForTokenClassification(__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : str ) -> Any: __UpperCAmelCase = self.prepare_config_and_inputs() ((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): a_ = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) a_ = ( { '''feature-extraction''': TFDistilBertModel, '''fill-mask''': TFDistilBertForMaskedLM, '''question-answering''': TFDistilBertForQuestionAnswering, '''text-classification''': TFDistilBertForSequenceClassification, '''token-classification''': TFDistilBertForTokenClassification, '''zero-shot''': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) a_ = False a_ = False def snake_case__ ( self : Any ) -> Any: __UpperCAmelCase = TFDistilBertModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 ) def snake_case__ ( self : List[Any] ) -> Optional[int]: self.config_tester.run_common_tests() def snake_case__ ( self : Any ) -> str: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__a ) def snake_case__ ( self : Tuple ) -> Dict: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__a ) def snake_case__ ( self : Union[str, Any] ) -> Any: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__a ) def snake_case__ ( self : Optional[Any] ) -> Dict: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a ) def snake_case__ ( self : Any ) -> int: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a ) def snake_case__ ( self : List[str] ) -> List[Any]: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__a ) @slow def snake_case__ ( self : Dict ) -> Tuple: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): __UpperCAmelCase = TFDistilBertModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_tf class A ( unittest.TestCase ): @slow def snake_case__ ( self : int ) -> Dict: __UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCAmelCase = model(__a )[0] __UpperCAmelCase = [1, 6, 7_6_8] self.assertEqual(output.shape , __a ) __UpperCAmelCase = tf.constant( [ [ [0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9], [0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4], [0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
654
0
'''simple docstring''' from __future__ import annotations import math def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list ): """simple docstring""" if len(UpperCamelCase__ ) != 2 or len(a[0] ) != 2 or len(UpperCamelCase__ ) != 2 or len(b[0] ) != 2: raise Exception('''Matrices are not 2x2''' ) __UpperCAmelCase = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list ): """simple docstring""" return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(UpperCamelCase__ ) ) ] def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list ): """simple docstring""" return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(UpperCamelCase__ ) ) ] def lowerCAmelCase ( UpperCamelCase__ : list ): """simple docstring""" if len(UpperCamelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('''Odd matrices are not supported!''' ) __UpperCAmelCase = len(UpperCamelCase__ ) __UpperCAmelCase = matrix_length // 2 __UpperCAmelCase = [[a[i][j] for j in range(UpperCamelCase__ , UpperCamelCase__ )] for i in range(UpperCamelCase__ )] __UpperCAmelCase = [ [a[i][j] for j in range(UpperCamelCase__ , UpperCamelCase__ )] for i in range(UpperCamelCase__ , UpperCamelCase__ ) ] __UpperCAmelCase = [[a[i][j] for j in range(UpperCamelCase__ )] for i in range(UpperCamelCase__ )] __UpperCAmelCase = [[a[i][j] for j in range(UpperCamelCase__ )] for i in range(UpperCamelCase__ , UpperCamelCase__ )] return top_left, top_right, bot_left, bot_right def lowerCAmelCase ( UpperCamelCase__ : list ): """simple docstring""" return len(UpperCamelCase__ ), len(matrix[0] ) def lowerCAmelCase ( UpperCamelCase__ : list ): """simple docstring""" print('''\n'''.join(str(UpperCamelCase__ ) for line in matrix ) ) def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list ): """simple docstring""" if matrix_dimensions(UpperCamelCase__ ) == (2, 2): return default_matrix_multiplication(UpperCamelCase__ , UpperCamelCase__ ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = split_matrix(UpperCamelCase__ ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = split_matrix(UpperCamelCase__ ) __UpperCAmelCase = actual_strassen(UpperCamelCase__ , matrix_subtraction(UpperCamelCase__ , UpperCamelCase__ ) ) __UpperCAmelCase = actual_strassen(matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ ) __UpperCAmelCase = actual_strassen(matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ ) __UpperCAmelCase = actual_strassen(UpperCamelCase__ , matrix_subtraction(UpperCamelCase__ , UpperCamelCase__ ) ) __UpperCAmelCase = actual_strassen(matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) , matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) ) __UpperCAmelCase = actual_strassen(matrix_subtraction(UpperCamelCase__ , UpperCamelCase__ ) , matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) ) __UpperCAmelCase = actual_strassen(matrix_subtraction(UpperCamelCase__ , UpperCamelCase__ ) , matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) ) __UpperCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ ) , UpperCamelCase__ ) __UpperCAmelCase = matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) __UpperCAmelCase = matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) __UpperCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ ) , UpperCamelCase__ ) # construct the new matrix from our 4 quadrants __UpperCAmelCase = [] for i in range(len(UpperCamelCase__ ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(UpperCamelCase__ ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list ): """simple docstring""" if matrix_dimensions(UpperCamelCase__ )[1] != matrix_dimensions(UpperCamelCase__ )[0]: __UpperCAmelCase = ( '''Unable to multiply these matrices, please check the dimensions.\n''' f"""Matrix A: {matrixa}\n""" f"""Matrix B: {matrixa}""" ) raise Exception(UpperCamelCase__ ) __UpperCAmelCase = matrix_dimensions(UpperCamelCase__ ) __UpperCAmelCase = matrix_dimensions(UpperCamelCase__ ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __UpperCAmelCase = max(*UpperCamelCase__ , *UpperCamelCase__ ) __UpperCAmelCase = int(math.pow(2 , math.ceil(math.loga(UpperCamelCase__ ) ) ) ) __UpperCAmelCase = matrixa __UpperCAmelCase = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , UpperCamelCase__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCamelCase__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCamelCase__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) __UpperCAmelCase = actual_strassen(UpperCamelCase__ , UpperCamelCase__ ) # Removing the additional zeros for i in range(0 , UpperCamelCase__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCamelCase__ ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": __lowerCAmelCase : Tuple = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] __lowerCAmelCase : Any = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
717
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __lowerCAmelCase : List[Any] = { "configuration_audio_spectrogram_transformer": [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ASTConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ASTForAudioClassification", "ASTModel", "ASTPreTrainedModel", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
654
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class A ( UpperCAmelCase__ , unittest.TestCase ): a_ = ShapEImgaImgPipeline a_ = ["image"] a_ = ["image"] a_ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] a_ = False @property def snake_case__ ( self : Any ) -> List[str]: return 3_2 @property def snake_case__ ( self : str ) -> str: return 3_2 @property def snake_case__ ( self : Optional[Any] ) -> Optional[Any]: return self.time_input_dim * 4 @property def snake_case__ ( self : Tuple ) -> Optional[int]: return 8 @property def snake_case__ ( self : Tuple ) -> Any: torch.manual_seed(0 ) __UpperCAmelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __UpperCAmelCase = CLIPVisionModel(lowerCamelCase__ ) return model @property def snake_case__ ( self : Optional[Any] ) -> Tuple: __UpperCAmelCase = CLIPImageProcessor( crop_size=2_2_4 , do_center_crop=lowerCamelCase__ , do_normalize=lowerCamelCase__ , do_resize=lowerCamelCase__ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_2_4 , ) return image_processor @property def snake_case__ ( self : Optional[Any] ) -> List[str]: torch.manual_seed(0 ) __UpperCAmelCase = { "num_attention_heads": 2, "attention_head_dim": 1_6, "embedding_dim": self.time_input_dim, "num_embeddings": 3_2, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "embedding_proj_norm_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } __UpperCAmelCase = PriorTransformer(**lowerCamelCase__ ) return model @property def snake_case__ ( self : List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) __UpperCAmelCase = { "param_shapes": ( (self.renderer_dim, 9_3), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 1_2, "background": ( 0.1, 0.1, 0.1, ), } __UpperCAmelCase = ShapERenderer(**lowerCamelCase__ ) return model def snake_case__ ( self : int ) -> List[str]: __UpperCAmelCase = self.dummy_prior __UpperCAmelCase = self.dummy_image_encoder __UpperCAmelCase = self.dummy_image_processor __UpperCAmelCase = self.dummy_renderer __UpperCAmelCase = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1_0_2_4 , prediction_type='''sample''' , use_karras_sigmas=lowerCamelCase__ , clip_sample=lowerCamelCase__ , clip_sample_range=1.0 , ) __UpperCAmelCase = { "prior": prior, "image_encoder": image_encoder, "image_processor": image_processor, "renderer": renderer, "scheduler": scheduler, } return components def snake_case__ ( self : List[str] , __a : Optional[Any] , __a : Optional[int]=0 ) -> Optional[Any]: __UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) if str(lowerCamelCase__ ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(lowerCamelCase__ ) else: __UpperCAmelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) __UpperCAmelCase = { "image": input_image, "generator": generator, "num_inference_steps": 1, "frame_size": 3_2, "output_type": "np", } return inputs def snake_case__ ( self : List[str] ) -> List[str]: __UpperCAmelCase = "cpu" __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = self.pipeline_class(**lowerCamelCase__ ) __UpperCAmelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCAmelCase = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) ) __UpperCAmelCase = output.images[0] __UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (2_0, 3_2, 3_2, 3) __UpperCAmelCase = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case__ ( self : Optional[Any] ) -> str: self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def snake_case__ ( self : Optional[int] ) -> List[Any]: __UpperCAmelCase = torch_device == "cpu" __UpperCAmelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowerCamelCase__ , relax_max_difference=lowerCamelCase__ , ) def snake_case__ ( self : int ) -> List[Any]: __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = self.pipeline_class(**lowerCamelCase__ ) __UpperCAmelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCAmelCase = 1 __UpperCAmelCase = 2 __UpperCAmelCase = self.get_dummy_inputs(lowerCamelCase__ ) for key in inputs.keys(): if key in self.batch_params: __UpperCAmelCase = batch_size * [inputs[key]] __UpperCAmelCase = pipe(**lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class A ( unittest.TestCase ): def snake_case__ ( self : Union[str, Any] ) -> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : Optional[int] ) -> Optional[Any]: __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) __UpperCAmelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) __UpperCAmelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCAmelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) __UpperCAmelCase = pipe( lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='''np''' , ).images[0] assert images.shape == (2_0, 6_4, 6_4, 3) assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
718
'''simple docstring''' from ...configuration_utils import PretrainedConfig class A ( UpperCAmelCase ): a_ = '''bert-generation''' def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = hidden_act __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = position_embedding_type __UpperCAmelCase = use_cache
654
0
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase : int = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class A ( __a , unittest.TestCase ): a_ = XLNetTokenizer a_ = XLNetTokenizerFast a_ = True a_ = True def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase = XLNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : int ) -> List[str]: __UpperCAmelCase = '''<s>''' __UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ ) def snake_case__ ( self : Any ) -> int: __UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''<eod>''' ) self.assertEqual(len(lowerCAmelCase_ ) , 1_0_0_6 ) def snake_case__ ( self : Optional[Any] ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def snake_case__ ( self : List[Any] ) -> List[str]: __UpperCAmelCase = XLNetTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ ) __UpperCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] ) __UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def snake_case__ ( self : str ) -> Optional[int]: __UpperCAmelCase = XLNetTokenizer(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ ) __UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + '''''', '''i''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] ) def snake_case__ ( self : int ) -> Dict: __UpperCAmelCase = XLNetTokenizer(lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ ) __UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) @slow def snake_case__ ( self : Optional[int] ) -> Optional[int]: __UpperCAmelCase = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' ) __UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase_ ) __UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase_ ) __UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) __UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def snake_case__ ( self : Tuple ) -> Union[str, Any]: __UpperCAmelCase = {'''input_ids''': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase_ , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
719
'''simple docstring''' from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) __lowerCAmelCase : str = 299_792_458 # Symbols __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z") def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 ) def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" return np.array( [ [gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0], [-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ): """simple docstring""" # Ensure event is not empty if event is None: __UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(UpperCamelCase__ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: __lowerCAmelCase : Dict = transform(29_979_245) print("Example of four vector: ") print(F"""ct' = {four_vector[0]}""") print(F"""x' = {four_vector[1]}""") print(F"""y' = {four_vector[2]}""") print(F"""z' = {four_vector[3]}""") # Substitute symbols with numerical values __lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1} __lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)] print(F"""\n{numerical_vector}""")
654
0
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : bool = False ): """simple docstring""" if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable: raise ValueError( '''Warning: upper bound of deterministic test is exceeded. ''' '''Pass allow_probable=True to allow probabilistic test. ''' '''A return value of True indicates a probable prime.''' ) # array bounds provided by analysis __UpperCAmelCase = [ 2_0_4_7, 1_3_7_3_6_5_3, 2_5_3_2_6_0_0_1, 3_2_1_5_0_3_1_7_5_1, 2_1_5_2_3_0_2_8_9_8_7_4_7, 3_4_7_4_7_4_9_6_6_0_3_8_3, 3_4_1_5_5_0_0_7_1_7_2_8_3_2_1, 1, 3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1, 1, 1, 3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1, 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1, ] __UpperCAmelCase = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1] for idx, _p in enumerate(UpperCamelCase__ , 1 ): if n < _p: # then we have our last prime to check __UpperCAmelCase = primes[:idx] break __UpperCAmelCase = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: __UpperCAmelCase = False for r in range(UpperCamelCase__ ): __UpperCAmelCase = pow(UpperCamelCase__ , d * 2**r , UpperCamelCase__ ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): __UpperCAmelCase = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def lowerCAmelCase ( ): """simple docstring""" assert not miller_rabin(5_6_1 ) assert miller_rabin(5_6_3 ) # 2047 assert not miller_rabin(8_3_8_2_0_1 ) assert miller_rabin(8_3_8_2_0_7 ) # 1_373_653 assert not miller_rabin(1_7_3_1_6_0_0_1 ) assert miller_rabin(1_7_3_1_6_0_1_7 ) # 25_326_001 assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 ) assert miller_rabin(3_0_7_8_3_8_6_6_5_3 ) # 3_215_031_751 assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 ) assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 ) # 2_152_302_898_747 assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 ) assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 ) # 3_474_749_660_383 assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 ) assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 ) # 341_550_071_728_321 assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 ) assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 ) # 3_825_123_056_546_413_051 assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 ) assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 ) assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
720
'''simple docstring''' import heapq import sys import numpy as np __lowerCAmelCase : Any = tuple[int, int] class A : def __init__( self : Optional[int] ) -> int: __UpperCAmelCase = [] __UpperCAmelCase = set() def snake_case__ ( self : Optional[Any] ) -> List[Any]: if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def snake_case__ ( self : Dict ) -> Optional[int]: return len(self.elements ) == 0 def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]: if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(__a ) else: # update # print("update", item) __UpperCAmelCase = [] ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def snake_case__ ( self : int , __a : Any ) -> int: if item in self.set: self.set.remove(__a ) __UpperCAmelCase = [] ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def snake_case__ ( self : List[str] ) -> Dict: return self.elements[0][1] def snake_case__ ( self : Any ) -> List[str]: ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) self.set.remove(__a ) return (priority, item) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # euclidean distance __UpperCAmelCase = np.array(UpperCamelCase__ ) __UpperCAmelCase = np.array(UpperCamelCase__ ) return np.linalg.norm(a - b ) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # integer division by time variable return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ): """simple docstring""" __UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ ) return ans def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ): """simple docstring""" __UpperCAmelCase = np.chararray((n, n) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): __UpperCAmelCase = '''*''' for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): if (j, (n - 1) - i) in blocks: __UpperCAmelCase = '''#''' __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[goal] while x != start: ((__UpperCAmelCase) , (__UpperCAmelCase)) = x # print(x) __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[x] __UpperCAmelCase = '''-''' for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) __UpperCAmelCase = back_pointer[goal] while x != start: print(UpperCamelCase__ , end=''' ''' ) __UpperCAmelCase = back_pointer[x] print(UpperCamelCase__ ) sys.exit() def lowerCAmelCase ( UpperCamelCase__ : TPos ): """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ): """simple docstring""" for itera in range(UpperCamelCase__ ): open_list[itera].remove_element(UpperCamelCase__ ) # print("s", s) # print("j", j) ((__UpperCAmelCase) , (__UpperCAmelCase)) = s __UpperCAmelCase = (x - 1, y) __UpperCAmelCase = (x + 1, y) __UpperCAmelCase = (x, y + 1) __UpperCAmelCase = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(UpperCamelCase__ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(UpperCamelCase__ ) __UpperCAmelCase = -1 __UpperCAmelCase = float('''inf''' ) if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1: __UpperCAmelCase = g_function[s] + 1 __UpperCAmelCase = s if neighbours not in close_list_anchor: open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) ) if neighbours not in close_list_inad: for var in range(1 , UpperCamelCase__ ): if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key( UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ): open_list[j].put( UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(1_5 , 2_0 ): some_list.append((x, 1_7) ) for x in range(1_0 , 1_9 ): for y in range(1 , 1_5 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(1_2 , 1_9 ): some_list.append((x, y) ) for x in range(3 , 1_3 ): for y in range(1_6 , 1_9 ): some_list.append((x, y) ) return some_list __lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __lowerCAmelCase : List[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __lowerCAmelCase : Dict = make_common_ground() __lowerCAmelCase : int = blocks_blk # hyper parameters __lowerCAmelCase : Dict = 1 __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : Union[str, Any] = 20 __lowerCAmelCase : Any = 3 # one consistent and two other inconsistent # start and end destination __lowerCAmelCase : Optional[Any] = (0, 0) __lowerCAmelCase : Any = (n - 1, n - 1) __lowerCAmelCase : Optional[int] = 1 def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = {start: 0, goal: float('''inf''' )} __UpperCAmelCase = {start: -1, goal: -1} __UpperCAmelCase = [] __UpperCAmelCase = set() for i in range(UpperCamelCase__ ): open_list.append(PriorityQueue() ) open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) __UpperCAmelCase = [] __UpperCAmelCase = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , UpperCamelCase__ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show() visited.add(UpperCamelCase__ ) expand_state( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) close_list_inad.append(UpperCamelCase__ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __UpperCAmelCase = open_list[0].top_show() visited.add(UpperCamelCase__ ) expand_state( UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) close_list_anchor.append(UpperCamelCase__ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(UpperCamelCase__ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
654
0
'''simple docstring''' from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch('''socket.socket''' ) @patch('''builtins.open''' ) def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Any ): """simple docstring""" __UpperCAmelCase = Mock() __UpperCAmelCase = conn, Mock() __UpperCAmelCase = iter([1, None] ) __UpperCAmelCase = lambda UpperCamelCase__ : next(_UpperCamelCase ) # ===== invoke ===== send_file(filename='''mytext.txt''' , testing=_UpperCamelCase ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
721
'''simple docstring''' import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py __lowerCAmelCase : List[Any] = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. __lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") __lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) __lowerCAmelCase : Optional[int] = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ] def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ ) return [m.group(0 ) for m in matches] def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __UpperCAmelCase = { config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. __UpperCAmelCase = collections.defaultdict(UpperCamelCase__ ) __UpperCAmelCase = collections.defaultdict(UpperCamelCase__ ) __UpperCAmelCase = collections.defaultdict(UpperCamelCase__ ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(UpperCamelCase__ ): __UpperCAmelCase = None if _re_tf_models.match(UpperCamelCase__ ) is not None: __UpperCAmelCase = tf_models __UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0] elif _re_flax_models.match(UpperCamelCase__ ) is not None: __UpperCAmelCase = flax_models __UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0] elif _re_pt_models.match(UpperCamelCase__ ) is not None: __UpperCAmelCase = pt_models __UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0] if lookup_dict is not None: while len(UpperCamelCase__ ) > 0: if attr_name in model_prefix_to_model_type: __UpperCAmelCase = True break # Try again after removing the last word in the name __UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] ) __UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) __UpperCAmelCase = list(UpperCamelCase__ ) all_models.sort() __UpperCAmelCase = {'''model_type''': all_models} __UpperCAmelCase = [pt_models[t] for t in all_models] __UpperCAmelCase = [tf_models[t] for t in all_models] __UpperCAmelCase = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure __UpperCAmelCase = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: __UpperCAmelCase = '''AutoProcessor''' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: __UpperCAmelCase = '''AutoTokenizer''' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: __UpperCAmelCase = '''AutoFeatureExtractor''' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. __UpperCAmelCase = '''AutoTokenizer''' __UpperCAmelCase = [processors[t] for t in all_models] return pd.DataFrame(UpperCamelCase__ ) def lowerCAmelCase ( UpperCamelCase__ : List[str] ): """simple docstring""" __UpperCAmelCase = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: __UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""] __UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): # The type of pipeline may not exist in this framework if not hasattr(UpperCamelCase__ , UpperCamelCase__ ): continue # First extract all model_names __UpperCAmelCase = [] for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): model_names.append(UpperCamelCase__ ) else: model_names.extend(list(UpperCamelCase__ ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ): """simple docstring""" __UpperCAmelCase = get_frameworks_table() __UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ ) __UpperCAmelCase = hf_hub_download( '''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ ) __UpperCAmelCase = Dataset.from_json(UpperCamelCase__ ) __UpperCAmelCase = { tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class''']) for i in range(len(UpperCamelCase__ ) ) } __UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. __UpperCAmelCase = sorted(table.keys() ) __UpperCAmelCase = pd.DataFrame( { '''model_class''': model_classes, '''pipeline_tag''': [table[m][0] for m in model_classes], '''auto_class''': [table[m][1] for m in model_classes], } ) __UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) ) tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) ) if commit_sha is not None: __UpperCAmelCase = ( f"""Update with commit {commit_sha}\n\nSee: """ f"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: __UpperCAmelCase = '''Update''' upload_folder( repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} __UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS __UpperCAmelCase = [] for key in pipeline_tasks: if key not in in_table: __UpperCAmelCase = pipeline_tasks[key]['''pt'''] if isinstance(UpperCamelCase__ , (list, tuple) ): __UpperCAmelCase = model[0] __UpperCAmelCase = model.__name__ if model not in in_table.values(): missing.append(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: __UpperCAmelCase = ''', '''.join(UpperCamelCase__ ) raise ValueError( '''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ''' f"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") __lowerCAmelCase : Tuple = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
654
0
from __future__ import annotations def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[list[str]] , UpperCamelCase__ : int , ): """simple docstring""" __UpperCAmelCase = len(UpperCamelCase__ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(UpperCamelCase__ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCamelCase__ , UpperCamelCase__ , ) def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = [] depth_first_search([] , [] , [] , UpperCamelCase__ , UpperCamelCase__ ) # Print all the boards for board in boards: for column in board: print(UpperCamelCase__ ) print('''''' ) print(len(UpperCamelCase__ ) , '''solutions were found.''' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
700
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) __lowerCAmelCase : Optional[int] = [ "cross_validation.py", "gradient_accumulation.py", "local_sgd.py", "multi_process_metrics.py", "memory.py", "automatic_gradient_accumulation.py", "fsdp_with_peak_mem_tracking.py", "deepspeed_with_config_support.py", "megatron_lm_gpt_pretraining.py", ] class A ( unittest.TestCase ): def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple: __UpperCAmelCase = None __UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) __UpperCAmelCase = os.path.abspath('''examples''' ) for item in os.listdir(__a ): if item not in EXCLUDE_EXAMPLES: __UpperCAmelCase = os.path.join(__a , __a ) if os.path.isfile(__a ) and ".py" in item_path: with self.subTest( tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ): __UpperCAmelCase = compare_against_test( os.path.join(__a , __a ) , __a , __a , __a ) __UpperCAmelCase = '''\n'''.join(__a ) if special_strings is not None: for string in special_strings: __UpperCAmelCase = diff.replace(__a , '''''' ) self.assertEqual(__a , '''''' ) def snake_case__ ( self : Optional[Any] ) -> str: self.one_complete_example('''complete_nlp_example.py''' , __a ) self.one_complete_example('''complete_nlp_example.py''' , __a ) def snake_case__ ( self : List[str] ) -> Tuple: __UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) __UpperCAmelCase = [ ''' ''' * 1_6 + '''{\n\n''', ''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 2_0 + '''"epoch": epoch,\n\n''', ''' ''' * 1_6 + '''},\n\n''', ''' ''' * 1_6 + '''step=epoch,\n''', ''' ''' * 1_2, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a ) self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a ) @mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} ) class A ( UpperCAmelCase ): a_ = False @classmethod def snake_case__ ( cls : Tuple ) -> str: super().setUpClass() __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) __UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case__ ( cls : Dict ) -> int: super().tearDownClass() shutil.rmtree(cls._tmpdir ) def snake_case__ ( self : Tuple ) -> Dict: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def snake_case__ ( self : str ) -> int: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() __UpperCAmelCase = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def snake_case__ ( self : Any ) -> Any: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )} """.split() __UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a ) self.assertNotIn('''epoch 0:''' , __a ) self.assertIn('''epoch 1:''' , __a ) def snake_case__ ( self : Tuple ) -> Optional[int]: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )} """.split() __UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a ) if torch.cuda.is_available(): __UpperCAmelCase = torch.cuda.device_count() else: __UpperCAmelCase = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , __a ) self.assertIn('''epoch 1:''' , __a ) else: self.assertIn('''epoch 0:''' , __a ) self.assertIn('''epoch 1:''' , __a ) @slow def snake_case__ ( self : Any ) -> Optional[Any]: __UpperCAmelCase = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): __UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a ) __UpperCAmelCase = re.findall('''({.+})''' , __a ) __UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1] __UpperCAmelCase = ast.literal_eval(__a ) self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 ) def snake_case__ ( self : Dict ) -> int: __UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Optional[Any] ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmpdir: __UpperCAmelCase = f""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) ) def snake_case__ ( self : Optional[int] ) -> List[Any]: __UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def snake_case__ ( self : Tuple ) -> Optional[Any]: __UpperCAmelCase = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
654
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase ) class A ( UpperCAmelCase ): a_ = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) a_ = Features({'''image''': Image()} ) a_ = Features({'''labels''': ClassLabel} ) a_ = "image" a_ = "labels" def snake_case__ ( self : str , __a : Optional[int] ) -> str: if self.label_column not in features: raise ValueError(f"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , __a ): raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" ) __UpperCAmelCase = copy.deepcopy(self ) __UpperCAmelCase = self.label_schema.copy() __UpperCAmelCase = features[self.label_column] __UpperCAmelCase = label_schema return task_template @property def snake_case__ ( self : Tuple ) -> Dict[str, str]: return { self.image_column: "image", self.label_column: "labels", }
701
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva __lowerCAmelCase : Any = "" __lowerCAmelCase : int = "" __lowerCAmelCase : Union[str, Any] = "" __lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ ) print('''Processing...''' ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for index, image in enumerate(UpperCamelCase__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __UpperCAmelCase = random_chars(3_2 ) __UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] ) print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" ) __UpperCAmelCase = [] for anno in new_annos[index]: __UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(UpperCamelCase__ ) with open(f"""/{file_root}.txt""" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ): """simple docstring""" __UpperCAmelCase = [] __UpperCAmelCase = [] for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ): __UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(UpperCamelCase__ ) as in_file: __UpperCAmelCase = in_file.readlines() __UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" ) __UpperCAmelCase = [] for obj_list in obj_lists: __UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(UpperCamelCase__ ) labels.append(UpperCamelCase__ ) return img_paths, labels def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ): """simple docstring""" __UpperCAmelCase = [] __UpperCAmelCase = [] __UpperCAmelCase = [] for idx in range(len(UpperCamelCase__ ) ): __UpperCAmelCase = [] __UpperCAmelCase = img_list[idx] path_list.append(UpperCamelCase__ ) __UpperCAmelCase = anno_list[idx] __UpperCAmelCase = cva.imread(UpperCamelCase__ ) if flip_type == 1: __UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ ) for bbox in img_annos: __UpperCAmelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ ) for bbox in img_annos: __UpperCAmelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(UpperCamelCase__ ) new_imgs_list.append(UpperCamelCase__ ) return new_imgs_list, new_annos_lists, path_list def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" __UpperCAmelCase = ascii_lowercase + digits return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) ) if __name__ == "__main__": main() print("DONE ✅")
654
0
'''simple docstring''' # This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ): """simple docstring""" __UpperCAmelCase = multiprocessing.Manager() __UpperCAmelCase = manager.list() __UpperCAmelCase = multiprocessing.Process(target=lowercase__ , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('''timed out''' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str ): """simple docstring""" with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil __UpperCAmelCase = shutil.rmtree __UpperCAmelCase = os.rmdir __UpperCAmelCase = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: __UpperCAmelCase = {} with swallow_io(): with time_limit(lowercase__ ): exec(lowercase__ , lowercase__ ) result.append('''passed''' ) except TimeoutException: result.append('''timed out''' ) except BaseException as e: result.append(f"""failed: {e}""" ) # Needed for cleaning up. __UpperCAmelCase = rmtree __UpperCAmelCase = rmdir __UpperCAmelCase = chdir @contextlib.contextmanager def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] ): """simple docstring""" def signal_handler(UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ): raise TimeoutException('''Timed out!''' ) signal.setitimer(signal.ITIMER_REAL , lowercase__ ) signal.signal(signal.SIGALRM , lowercase__ ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = WriteOnlyStringIO() with contextlib.redirect_stdout(lowercase__ ): with contextlib.redirect_stderr(lowercase__ ): with redirect_stdin(lowercase__ ): yield @contextlib.contextmanager def lowerCAmelCase ( ): """simple docstring""" with tempfile.TemporaryDirectory() as dirname: with chdir(lowercase__ ): yield dirname class A ( __snake_case ): pass class A ( io.StringIO ): def snake_case__ ( self : Union[str, Any] , *__a : str , **__a : Optional[int] ) -> Dict: raise OSError def snake_case__ ( self : Optional[int] , *__a : Optional[int] , **__a : int ) -> Dict: raise OSError def snake_case__ ( self : Optional[Any] , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]: raise OSError def snake_case__ ( self : Dict , *__a : List[Any] , **__a : Any ) -> Optional[Any]: return False class A ( contextlib._RedirectStream ): # type: ignore a_ = '''stdin''' @contextlib.contextmanager def lowerCAmelCase ( UpperCamelCase__ : List[Any] ): """simple docstring""" if root == ".": yield return __UpperCAmelCase = os.getcwd() os.chdir(lowercase__ ) try: yield except BaseException as exc: raise exc finally: os.chdir(lowercase__ ) def lowerCAmelCase ( UpperCamelCase__ : Optional[Any]=None ): """simple docstring""" if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins __UpperCAmelCase = None __UpperCAmelCase = None import os __UpperCAmelCase = '''1''' __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None import shutil __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None import subprocess __UpperCAmelCase = None # type: ignore __UpperCAmelCase = None import sys __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None
702
'''simple docstring''' from pathlib import Path import fire def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = Path(UpperCamelCase__ ) __UpperCAmelCase = Path(UpperCamelCase__ ) dest_dir.mkdir(exist_ok=UpperCamelCase__ ) for path in src_dir.iterdir(): __UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n] __UpperCAmelCase = dest_dir.joinpath(path.name ) print(UpperCamelCase__ ) dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) ) if __name__ == "__main__": fire.Fire(minify)
654
0
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule __lowerCAmelCase : Optional[Any] = { 'config': [ 'EXTERNAL_DATA_FORMAT_SIZE_LIMIT', 'OnnxConfig', 'OnnxConfigWithPast', 'OnnxSeq2SeqConfigWithPast', 'PatchingSpec', ], 'convert': ['export', 'validate_model_outputs'], 'features': ['FeaturesManager'], 'utils': ['ParameterFormat', 'compute_serialized_parameters_size'], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
703
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): __UpperCAmelCase = f"""Input value of [number={number}] must be an integer""" raise TypeError(UpperCamelCase__ ) if number < 1: __UpperCAmelCase = f"""Input value of [number={number}] must be > 0""" raise ValueError(UpperCamelCase__ ) __UpperCAmelCase = 1 for i in range(1 , UpperCamelCase__ ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
654
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : str = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Any = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Dict = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[str] = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Tuple = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
704
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowerCAmelCase ( ): """simple docstring""" raise RuntimeError('''CUDA out of memory.''' ) class A ( nn.Module ): def __init__( self : Optional[Any] ) -> int: super().__init__() __UpperCAmelCase = nn.Linear(3 , 4 ) __UpperCAmelCase = nn.BatchNormad(4 ) __UpperCAmelCase = nn.Linear(4 , 5 ) def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]: return self.lineara(self.batchnorm(self.lineara(__a ) ) ) class A ( unittest.TestCase ): def snake_case__ ( self : Optional[int] ) -> Any: __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_2_8 ) def mock_training_loop_function(__a : Union[str, Any] ): nonlocal batch_sizes batch_sizes.append(__a ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] ) def snake_case__ ( self : str ) -> int: __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_2_8 ) def mock_training_loop_function(__a : str , __a : Optional[int] ): nonlocal batch_sizes batch_sizes.append(__a ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga __UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' ) self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def snake_case__ ( self : Any ) -> int: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(__a : Optional[int] ): pass with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def snake_case__ ( self : Any ) -> List[Any]: @find_executable_batch_size(starting_batch_size=1_6 ) def mock_training_loop_function(__a : Dict ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def snake_case__ ( self : List[Any] ) -> List[str]: @find_executable_batch_size(starting_batch_size=1_2_8 ) def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(__a ) as cm: mock_training_loop_function(1_2_8 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def snake_case__ ( self : Tuple ) -> Optional[Any]: @find_executable_batch_size(starting_batch_size=1_6 ) def mock_training_loop_function(__a : Tuple ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def snake_case__ ( self : Any ) -> List[Any]: __UpperCAmelCase = torch.cuda.memory_allocated() __UpperCAmelCase = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , __a ) __UpperCAmelCase = release_memory(__a ) self.assertEqual(torch.cuda.memory_allocated() , __a )
654
0
'''simple docstring''' from __future__ import annotations class A : def __init__( self : Union[str, Any] , __a : List[str]=None ) -> Tuple: __UpperCAmelCase = data __UpperCAmelCase = None def __repr__( self : int ) -> Any: __UpperCAmelCase = [] __UpperCAmelCase = self while temp: string_rep.append(f"""{temp.data}""" ) __UpperCAmelCase = temp.next return "->".join(__snake_case ) def lowerCAmelCase ( UpperCamelCase__ : list ): """simple docstring""" if not elements_list: raise Exception('''The Elements List is empty''' ) __UpperCAmelCase = Node(elements_list[0] ) for i in range(1 , len(a_ ) ): __UpperCAmelCase = Node(elements_list[i] ) __UpperCAmelCase = current.next return head def lowerCAmelCase ( UpperCamelCase__ : Node ): """simple docstring""" if head_node is not None and isinstance(a_ , a_ ): print_reverse(head_node.next ) print(head_node.data ) def lowerCAmelCase ( ): """simple docstring""" from doctest import testmod testmod() __UpperCAmelCase = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print('''Linked List:''' ) print(a_ ) print('''Elements in Reverse:''' ) print_reverse(a_ ) if __name__ == "__main__": main()
705
'''simple docstring''' from __future__ import annotations import math def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = u for i in range(1 , UpperCamelCase__ ): __UpperCAmelCase = temp * (u - i) return temp def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = int(input('''enter the numbers of values: ''' ) ) __UpperCAmelCase = [] for _ in range(UpperCamelCase__ ): y.append([] ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): y[i].append(UpperCamelCase__ ) __UpperCAmelCase = 0 print('''enter the values of parameters in a list: ''' ) __UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) ) print('''enter the values of corresponding parameters: ''' ) for i in range(UpperCamelCase__ ): __UpperCAmelCase = float(input() ) __UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) ) __UpperCAmelCase = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , UpperCamelCase__ ): for j in range(n - i ): __UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1] __UpperCAmelCase = y[0][0] for i in range(1 , UpperCamelCase__ ): summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ ) print(f"""the value at {value} is {summ}""" ) if __name__ == "__main__": main()
654
0
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( UpperCAmelCase , unittest.TestCase ): a_ = RobertaTokenizer a_ = RobertaTokenizerFast a_ = True a_ = {"cls_token": "<s>"} def snake_case__ ( self : int ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __UpperCAmelCase = dict(zip(__a , range(len(__a ) ) ) ) __UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __UpperCAmelCase = {'''unk_token''': '''<unk>'''} __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__a ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__a ) ) def snake_case__ ( self : Tuple , **__a : Optional[Any] ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a ) def snake_case__ ( self : Union[str, Any] , **__a : List[Any] ) -> List[str]: kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__a ) def snake_case__ ( self : str , __a : Union[str, Any] ) -> List[str]: __UpperCAmelCase = '''lower newer''' __UpperCAmelCase = '''lower newer''' return input_text, output_text def snake_case__ ( self : Tuple ) -> Optional[int]: __UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCAmelCase = '''lower newer''' __UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __UpperCAmelCase = tokenizer.tokenize(__a ) # , add_prefix_space=True) self.assertListEqual(__a , __a ) __UpperCAmelCase = tokens + [tokenizer.unk_token] __UpperCAmelCase = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) def snake_case__ ( self : Tuple ) -> str: __UpperCAmelCase = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__a ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__a ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , ) @slow def snake_case__ ( self : List[Any] ) -> Optional[Any]: __UpperCAmelCase = self.tokenizer_class.from_pretrained('''roberta-base''' ) __UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__a ) __UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__a ) __UpperCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=__a , add_prefix_space=__a ) __UpperCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__a , add_prefix_space=__a ) __UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__a ) __UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def snake_case__ ( self : Union[str, Any] ) -> Tuple: __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = '''Encode this sequence.''' __UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments __UpperCAmelCase = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__a , __a ) __UpperCAmelCase = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__a , __a ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) __UpperCAmelCase = tokenizer.encode(__a , add_special_tokens=__a ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__a , __a ) # Testing spaces after special tokens __UpperCAmelCase = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space __UpperCAmelCase = tokenizer.convert_tokens_to_ids(__a ) __UpperCAmelCase = '''Encode <mask> sequence''' __UpperCAmelCase = '''Encode <mask>sequence''' __UpperCAmelCase = tokenizer.encode(__a ) __UpperCAmelCase = encoded.index(__a ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__a , __a ) __UpperCAmelCase = tokenizer.encode(__a ) __UpperCAmelCase = encoded.index(__a ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__a , __a ) def snake_case__ ( self : List[Any] ) -> int: pass def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a ) __UpperCAmelCase = self.tokenizer_class.from_pretrained(__a , **__a ) __UpperCAmelCase = '''A, <mask> AllenNLP sentence.''' __UpperCAmelCase = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a ) __UpperCAmelCase = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) __UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) __UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( __a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( __a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def snake_case__ ( self : Dict ) -> Dict: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a ) __UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __a ) self.assertEqual(post_processor_state['''add_prefix_space'''] , __a ) self.assertEqual(post_processor_state['''trim_offsets'''] , __a ) def snake_case__ ( self : Dict ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` __UpperCAmelCase = f"""{text_of_1_token} {text_of_1_token}""" __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( __a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a ) __UpperCAmelCase = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( __a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a ) __UpperCAmelCase = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( __a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a ) __UpperCAmelCase = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( __a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a ) __UpperCAmelCase = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , ) __UpperCAmelCase = f""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( __a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a ) __UpperCAmelCase = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( __a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a ) __UpperCAmelCase = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( __a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a ) __UpperCAmelCase = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
706
'''simple docstring''' import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : Dict = logging.get_logger(__name__) def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ): """simple docstring""" __UpperCAmelCase = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''), ('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''), ('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''), ('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''), ('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''), ('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''), ] ) return rename_keys def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ): """simple docstring""" for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) __UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" ) __UpperCAmelCase = in_proj_weight[ : encoder_config.hidden_size, : ] __UpperCAmelCase = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] __UpperCAmelCase = in_proj_weight[ -encoder_config.hidden_size :, : ] def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ): """simple docstring""" __UpperCAmelCase = dct.pop(UpperCamelCase__ ) __UpperCAmelCase = val def lowerCAmelCase ( UpperCamelCase__ : Dict ): """simple docstring""" if "handwritten" in checkpoint_url: __UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: __UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg''' __UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' ) return im @torch.no_grad() def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ): """simple docstring""" __UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ ) __UpperCAmelCase = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: __UpperCAmelCase = 7_6_8 elif "large" in checkpoint_url: # use ViT-large encoder __UpperCAmelCase = 1_0_2_4 __UpperCAmelCase = 4_0_9_6 __UpperCAmelCase = 2_4 __UpperCAmelCase = 1_6 __UpperCAmelCase = 1_0_2_4 else: raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: __UpperCAmelCase = False __UpperCAmelCase = '''relu''' __UpperCAmelCase = 1_0_2_4 __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = False # load HuggingFace model __UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ ) __UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ ) __UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) model.eval() # load state_dict of original model, rename some keys __UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model'''] __UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): __UpperCAmelCase = state_dict.pop(UpperCamelCase__ ) if key.startswith('''decoder''' ) and "output_projection" not in key: __UpperCAmelCase = val else: __UpperCAmelCase = val # load state dict model.load_state_dict(UpperCamelCase__ ) # Check outputs on an image __UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size ) __UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' ) __UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ ) __UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values # verify logits __UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) __UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ) __UpperCAmelCase = outputs.logits __UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] ) if "trocr-base-handwritten" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] ) elif "trocr-large-handwritten" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] ) elif "trocr-base-printed" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] ) elif "trocr-large-printed" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected" Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(UpperCamelCase__ ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) __lowerCAmelCase : Optional[int] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
654
0
'''simple docstring''' import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel __lowerCAmelCase : List[str] = HfApi() __lowerCAmelCase : Union[str, Any] = {} # fmt: off __lowerCAmelCase : int = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) __lowerCAmelCase : List[Any] = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) __lowerCAmelCase : List[str] = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) __lowerCAmelCase : Tuple = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) __lowerCAmelCase : Tuple = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) __lowerCAmelCase : Optional[Any] = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) __lowerCAmelCase : Optional[int] = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) __lowerCAmelCase : Dict = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) __lowerCAmelCase : int = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) __lowerCAmelCase : Optional[Any] = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) __lowerCAmelCase : List[str] = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) __lowerCAmelCase : Union[str, Any] = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) __lowerCAmelCase : List[Any] = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) __lowerCAmelCase : Dict = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) __lowerCAmelCase : Union[str, Any] = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on __lowerCAmelCase : List[Any] = api.list_models(filter="diffusers") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": __lowerCAmelCase : Any = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1] print(F"""Started running {mod.modelId}!!!""") if mod.modelId.startswith("CompVis"): __lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet") else: __lowerCAmelCase : Tuple = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) __lowerCAmelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) __lowerCAmelCase : List[Any] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): __lowerCAmelCase : str = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3 ) print(F"""{mod.modelId} has passed successfully!!!""")
707
'''simple docstring''' import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class A ( unittest.TestCase ): def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]: return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy""" def snake_case__ ( self : Dict ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple: __UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa __UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a ) return image def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any: __UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa __UpperCAmelCase = '''bf16''' if fpaa else None __UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained( __a , subfolder='''unet''' , dtype=__a , revision=__a ) return model, params def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]: __UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa __UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]], [1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]], [8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]], [3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]], # fmt: on ] ) def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any: __UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a ) __UpperCAmelCase = self.get_latents(__a , fpaa=__a ) __UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a ) __UpperCAmelCase = model.apply( {'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample assert sample.shape == latents.shape __UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__a , __a , atol=1e-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]], [1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]], [8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]], [3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]], # fmt: on ] ) def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a ) __UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a ) __UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a ) __UpperCAmelCase = model.apply( {'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample assert sample.shape == latents.shape __UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__a , __a , atol=1e-2 )
654
0
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class A ( unittest.TestCase ): def __init__( self : Union[str, Any] , __a : List[str] , __a : List[str]=7 , __a : Dict=3 , __a : List[str]=3_0 , __a : Dict=4_0_0 , __a : List[str]=True , __a : int=None , __a : Optional[Any]=True , __a : Tuple=[0.5, 0.5, 0.5] , __a : List[Any]=[0.5, 0.5, 0.5] , __a : Any=True , __a : Optional[int]=1 / 2_5_5 , __a : Any=True , ) -> Dict: __UpperCAmelCase = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = num_channels __UpperCAmelCase = min_resolution __UpperCAmelCase = max_resolution __UpperCAmelCase = do_resize __UpperCAmelCase = size __UpperCAmelCase = do_normalize __UpperCAmelCase = image_mean __UpperCAmelCase = image_std __UpperCAmelCase = do_rescale __UpperCAmelCase = rescale_factor __UpperCAmelCase = do_pad def snake_case__ ( self : str ) -> str: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case__ ( self : List[str] , __a : Tuple , __a : str=False ) -> List[str]: if not batched: __UpperCAmelCase = image_inputs[0] if isinstance(_snake_case , Image.Image ): __UpperCAmelCase , __UpperCAmelCase = image.size else: __UpperCAmelCase , __UpperCAmelCase = image.shape[1], image.shape[2] if w < h: __UpperCAmelCase = int(self.size['''shortest_edge'''] * h / w ) __UpperCAmelCase = self.size['''shortest_edge'''] elif w > h: __UpperCAmelCase = self.size['''shortest_edge'''] __UpperCAmelCase = int(self.size['''shortest_edge'''] * w / h ) else: __UpperCAmelCase = self.size['''shortest_edge'''] __UpperCAmelCase = self.size['''shortest_edge'''] else: __UpperCAmelCase = [] for image in image_inputs: __UpperCAmelCase , __UpperCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCAmelCase = max(_snake_case , key=lambda __a : item[0] )[0] __UpperCAmelCase = max(_snake_case , key=lambda __a : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A ( UpperCAmelCase_ , unittest.TestCase ): a_ = YolosImageProcessor if is_vision_available() else None def snake_case__ ( self : List[Any] ) -> Optional[int]: __UpperCAmelCase = YolosImageProcessingTester(self ) @property def snake_case__ ( self : int ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : str ) -> str: __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , '''image_mean''' ) ) self.assertTrue(hasattr(_snake_case , '''image_std''' ) ) self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) ) self.assertTrue(hasattr(_snake_case , '''do_resize''' ) ) self.assertTrue(hasattr(_snake_case , '''size''' ) ) def snake_case__ ( self : str ) -> Optional[Any]: __UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} ) self.assertEqual(image_processor.do_pad , _snake_case ) __UpperCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=_snake_case ) self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} ) self.assertEqual(image_processor.do_pad , _snake_case ) def snake_case__ ( self : List[str] ) -> int: pass def snake_case__ ( self : Union[str, Any] ) -> Any: __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input __UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(_snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case ) __UpperCAmelCase = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self : Tuple ) -> List[Any]: __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , np.ndarray ) # Test not batched input __UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(_snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values __UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self : Optional[int] ) -> List[Any]: __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , torch.Tensor ) # Test not batched input __UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(_snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values __UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self : Any ) -> Tuple: __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) __UpperCAmelCase = self.image_processing_class(do_resize=_snake_case , do_normalize=_snake_case , do_rescale=_snake_case ) # create random PyTorch tensors __UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors __UpperCAmelCase = image_processing_a.pad(_snake_case , return_tensors='''pt''' ) __UpperCAmelCase = image_processing_a(_snake_case , return_tensors='''pt''' ) self.assertTrue( torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) ) @slow def snake_case__ ( self : List[str] ) -> List[Any]: __UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: __UpperCAmelCase = json.loads(f.read() ) __UpperCAmelCase = {'''image_id''': 3_9_7_6_9, '''annotations''': target} # encode them __UpperCAmelCase = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' ) __UpperCAmelCase = image_processing(images=_snake_case , annotations=_snake_case , return_tensors='''pt''' ) # verify pixel values __UpperCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding['''pixel_values'''].shape , _snake_case ) __UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _snake_case , atol=1e-4 ) ) # verify area __UpperCAmelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _snake_case ) ) # verify boxes __UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _snake_case ) __UpperCAmelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _snake_case , atol=1e-3 ) ) # verify image_id __UpperCAmelCase = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _snake_case ) ) # verify is_crowd __UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _snake_case ) ) # verify class_labels __UpperCAmelCase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _snake_case ) ) # verify orig_size __UpperCAmelCase = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _snake_case ) ) # verify size __UpperCAmelCase = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _snake_case ) ) @slow def snake_case__ ( self : List[str] ) -> Any: __UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: __UpperCAmelCase = json.loads(f.read() ) __UpperCAmelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target} __UpperCAmelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them __UpperCAmelCase = YolosImageProcessor(format='''coco_panoptic''' ) __UpperCAmelCase = image_processing(images=_snake_case , annotations=_snake_case , masks_path=_snake_case , return_tensors='''pt''' ) # verify pixel values __UpperCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding['''pixel_values'''].shape , _snake_case ) __UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _snake_case , atol=1e-4 ) ) # verify area __UpperCAmelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _snake_case ) ) # verify boxes __UpperCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _snake_case ) __UpperCAmelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _snake_case , atol=1e-3 ) ) # verify image_id __UpperCAmelCase = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _snake_case ) ) # verify is_crowd __UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _snake_case ) ) # verify class_labels __UpperCAmelCase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _snake_case ) ) # verify masks __UpperCAmelCase = 8_2_2_8_7_3 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _snake_case ) # verify orig_size __UpperCAmelCase = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _snake_case ) ) # verify size __UpperCAmelCase = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _snake_case ) )
708
'''simple docstring''' import argparse import os import re import packaging.version __lowerCAmelCase : Optional[int] = "examples/" __lowerCAmelCase : Dict = { "examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","), "doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"), } __lowerCAmelCase : List[str] = { "init": "src/transformers/__init__.py", "setup": "setup.py", } __lowerCAmelCase : int = "README.md" def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ): """simple docstring""" with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __UpperCAmelCase = f.read() __UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern] __UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ ) __UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(UpperCamelCase__ ) def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ): """simple docstring""" for folder, directories, fnames in os.walk(UpperCamelCase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' ) def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if not patch: update_version_in_examples(UpperCamelCase__ ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = '''🤗 Transformers currently provides the following architectures''' __UpperCAmelCase = '''1. Want to contribute a new model?''' with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __UpperCAmelCase = f.readlines() # Find the start of the list. __UpperCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __UpperCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): __UpperCAmelCase = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(UpperCamelCase__ ) def lowerCAmelCase ( ): """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: __UpperCAmelCase = f.read() __UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0] return packaging.version.parse(UpperCamelCase__ ) def lowerCAmelCase ( UpperCamelCase__ : Any=False ): """simple docstring""" __UpperCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: __UpperCAmelCase = default_version.base_version elif patch: __UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" ) if len(UpperCamelCase__ ) == 0: __UpperCAmelCase = default_version print(f"""Updating version to {version}.""" ) global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = get_version() __UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __UpperCAmelCase = current_version.base_version # Check with the user we got that right. __UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(UpperCamelCase__ ) == 0: __UpperCAmelCase = dev_version print(f"""Updating version to {version}.""" ) global_version_update(UpperCamelCase__ ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": __lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") __lowerCAmelCase : Tuple = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
654
0
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ): """simple docstring""" __UpperCAmelCase = '''''' for i in table: res += inp[i - 1] return res def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ): """simple docstring""" return data[1:] + data[0] def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase = '''''' for i in range(len(UpperCamelCase__ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str ): """simple docstring""" __UpperCAmelCase = int('''0b''' + data[0] + data[-1] , 2 ) __UpperCAmelCase = int('''0b''' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = message[:4] __UpperCAmelCase = message[4:] __UpperCAmelCase = apply_table(UpperCamelCase__ , UpperCamelCase__ ) __UpperCAmelCase = xor(UpperCamelCase__ , UpperCamelCase__ ) __UpperCAmelCase = apply_sbox(UpperCamelCase__ , temp[:4] ) # noqa: E741 __UpperCAmelCase = apply_sbox(UpperCamelCase__ , temp[4:] ) __UpperCAmelCase = '''0''' * (2 - len(UpperCamelCase__ )) + l # noqa: E741 __UpperCAmelCase = '''0''' * (2 - len(UpperCamelCase__ )) + r __UpperCAmelCase = apply_table(l + r , UpperCamelCase__ ) __UpperCAmelCase = xor(UpperCamelCase__ , UpperCamelCase__ ) return temp + right if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = input("Enter 10 bit key: ") __lowerCAmelCase : str = input("Enter 8 bit message: ") __lowerCAmelCase : List[str] = [6, 3, 7, 4, 8, 5, 10, 9] __lowerCAmelCase : Tuple = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] __lowerCAmelCase : Optional[Any] = [2, 4, 3, 1] __lowerCAmelCase : int = [2, 6, 3, 1, 4, 8, 5, 7] __lowerCAmelCase : int = [4, 1, 3, 5, 7, 2, 8, 6] __lowerCAmelCase : Dict = [4, 1, 2, 3, 2, 3, 4, 1] __lowerCAmelCase : Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] __lowerCAmelCase : str = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation __lowerCAmelCase : Dict = apply_table(key, paa_table) __lowerCAmelCase : str = temp[:5] __lowerCAmelCase : str = temp[5:] __lowerCAmelCase : Tuple = left_shift(left) __lowerCAmelCase : Union[str, Any] = left_shift(right) __lowerCAmelCase : Union[str, Any] = apply_table(left + right, pa_table) __lowerCAmelCase : Union[str, Any] = left_shift(left) __lowerCAmelCase : int = left_shift(right) __lowerCAmelCase : Any = left_shift(left) __lowerCAmelCase : List[str] = left_shift(right) __lowerCAmelCase : Optional[Any] = apply_table(left + right, pa_table) # encryption __lowerCAmelCase : int = apply_table(message, IP) __lowerCAmelCase : Dict = function(expansion, sa, sa, keya, temp) __lowerCAmelCase : Dict = temp[4:] + temp[:4] __lowerCAmelCase : Dict = function(expansion, sa, sa, keya, temp) __lowerCAmelCase : Union[str, Any] = apply_table(temp, IP_inv) print("Cipher text is:", CT) # decryption __lowerCAmelCase : Optional[Any] = apply_table(CT, IP) __lowerCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp) __lowerCAmelCase : str = temp[4:] + temp[:4] __lowerCAmelCase : Tuple = function(expansion, sa, sa, keya, temp) __lowerCAmelCase : Tuple = apply_table(temp, IP_inv) print("Plain text after decypting is:", PT)
709
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : Tuple ): """simple docstring""" # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection __UpperCAmelCase = len(UpperCamelCase__ ) __UpperCAmelCase = max(UpperCamelCase__ ) __UpperCAmelCase = min(UpperCamelCase__ ) # create the counting array __UpperCAmelCase = coll_max + 1 - coll_min __UpperCAmelCase = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , UpperCamelCase__ ): __UpperCAmelCase = counting_arr[i] + counting_arr[i - 1] # create the output collection __UpperCAmelCase = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , UpperCamelCase__ ) ): __UpperCAmelCase = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def lowerCAmelCase ( UpperCamelCase__ : Any ): """simple docstring""" return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt" __lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip() __lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")] print(counting_sort(unsorted))
654
0
'''simple docstring''' import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class A ( unittest.TestCase ): a_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def snake_case__ ( self : List[str] , __a : Tuple , __a : List[str] , __a : List[Any] ) -> Optional[Any]: __UpperCAmelCase = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) __UpperCAmelCase = VideoClassificationPipeline(model=_a , image_processor=_a , top_k=2 ) __UpperCAmelCase = [ example_video_filepath, """https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""", ] return video_classifier, examples def snake_case__ ( self : int , __a : Any , __a : Optional[Any] ) -> str: for example in examples: __UpperCAmelCase = video_classifier(_a ) self.assertEqual( _a , [ {'''score''': ANY(_a ), '''label''': ANY(_a )}, {'''score''': ANY(_a ), '''label''': ANY(_a )}, ] , ) @require_torch def snake_case__ ( self : str ) -> Dict: __UpperCAmelCase = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification""" __UpperCAmelCase = VideoMAEFeatureExtractor( size={'''shortest_edge''': 1_0} , crop_size={'''height''': 1_0, '''width''': 1_0} ) __UpperCAmelCase = pipeline( '''video-classification''' , model=_a , feature_extractor=_a , frame_sampling_rate=4 ) __UpperCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) __UpperCAmelCase = video_classifier(_a , top_k=2 ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}] , ) __UpperCAmelCase = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [ [{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}], [{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}], ] , ) @require_tf def snake_case__ ( self : Union[str, Any] ) -> Dict: pass
710
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ): """simple docstring""" __UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" __UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' ) __UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
654
0
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): a_ = 1 @register_to_config def __init__( self : List[str] , __a : str=2_0_0_0 , __a : List[Any]=0.1 , __a : Any=2_0 , __a : str=1e-3 ) -> Optional[int]: __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None def snake_case__ ( self : int , __a : int , __a : Dict = None ) -> Optional[int]: __UpperCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase__ , device=UpperCamelCase__ ) def snake_case__ ( self : Any , __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any]=None ) -> str: if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score __UpperCAmelCase = ( -0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) __UpperCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) __UpperCAmelCase = std.flatten() while len(std.shape ) < len(score.shape ): __UpperCAmelCase = std.unsqueeze(-1 ) __UpperCAmelCase = -score / std # compute __UpperCAmelCase = -1.0 / len(self.timesteps ) __UpperCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) __UpperCAmelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): __UpperCAmelCase = beta_t.unsqueeze(-1 ) __UpperCAmelCase = -0.5 * beta_t * x __UpperCAmelCase = torch.sqrt(UpperCamelCase__ ) __UpperCAmelCase = drift - diffusion**2 * score __UpperCAmelCase = x + drift * dt # add noise __UpperCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase__ , device=x.device , dtype=x.dtype ) __UpperCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self : Dict ) -> str: return self.config.num_train_timesteps
711
'''simple docstring''' from __future__ import annotations from statistics import mean def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = [0] * no_of_processes __UpperCAmelCase = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(UpperCamelCase__ ): __UpperCAmelCase = burst_time[i] __UpperCAmelCase = [] __UpperCAmelCase = 0 __UpperCAmelCase = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: __UpperCAmelCase = [] __UpperCAmelCase = -1 for i in range(UpperCamelCase__ ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: __UpperCAmelCase = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: __UpperCAmelCase = i total_time += burst_time[target_process] completed += 1 __UpperCAmelCase = 0 __UpperCAmelCase = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ): """simple docstring""" __UpperCAmelCase = [0] * no_of_processes for i in range(UpperCamelCase__ ): __UpperCAmelCase = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("[TEST CASE 01]") __lowerCAmelCase : List[Any] = 4 __lowerCAmelCase : List[Any] = [2, 5, 3, 7] __lowerCAmelCase : Tuple = [0, 0, 0, 0] __lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes) __lowerCAmelCase : Dict = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time") for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
654
0
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = int(number**0.5 ) return number == sq * sq def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den __UpperCAmelCase = x_den * y_den * z_den __UpperCAmelCase = gcd(lowerCamelCase_ , lowerCamelCase_ ) top //= hcf bottom //= hcf return top, bottom def lowerCAmelCase ( UpperCamelCase__ : int = 3_5 ): """simple docstring""" __UpperCAmelCase = set() __UpperCAmelCase = 4_2 __UpperCAmelCase = Fraction(0 ) __UpperCAmelCase = 4_2 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 __UpperCAmelCase = x_num * y_den + x_den * y_num __UpperCAmelCase = x_den * y_den __UpperCAmelCase = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __UpperCAmelCase = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) # n=2 __UpperCAmelCase = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) __UpperCAmelCase = x_den * x_den * y_den * y_den if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ): __UpperCAmelCase = int(sqrt(lowerCamelCase_ ) ) __UpperCAmelCase = int(sqrt(lowerCamelCase_ ) ) __UpperCAmelCase = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __UpperCAmelCase = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) # n=-1 __UpperCAmelCase = x_num * y_num __UpperCAmelCase = x_den * y_num + x_num * y_den __UpperCAmelCase = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __UpperCAmelCase = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) # n=2 __UpperCAmelCase = x_num * x_num * y_num * y_num __UpperCAmelCase = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ): __UpperCAmelCase = int(sqrt(lowerCamelCase_ ) ) __UpperCAmelCase = int(sqrt(lowerCamelCase_ ) ) __UpperCAmelCase = gcd(lowerCamelCase_ , lowerCamelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __UpperCAmelCase = add_three( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) unique_s.add(lowerCamelCase_ ) for num, den in unique_s: total += Fraction(lowerCamelCase_ , lowerCamelCase_ ) return total.denominator + total.numerator if __name__ == "__main__": print(F"""{solution() = }""")
712
'''simple docstring''' from ..utils import DummyObject, requires_backends class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : int , **__a : int ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict: requires_backends(cls , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : str , **__a : Any ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : Any , **__a : int ) -> Tuple: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]: requires_backends(cls , ['''torch'''] )
654
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : List[str] = { "configuration_xlm_roberta_xl": [ "XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaXLConfig", "XLMRobertaXLOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = [ "XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaXLForCausalLM", "XLMRobertaXLForMaskedLM", "XLMRobertaXLForMultipleChoice", "XLMRobertaXLForQuestionAnswering", "XLMRobertaXLForSequenceClassification", "XLMRobertaXLForTokenClassification", "XLMRobertaXLModel", "XLMRobertaXLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys __lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
713
'''simple docstring''' import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
654
0
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def lowerCAmelCase ( UpperCamelCase__ : Any ): """simple docstring""" return (data["data"], data["target"]) def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ): """simple docstring""" __UpperCAmelCase = XGBRegressor(verbosity=0 , random_state=4_2 ) xgb.fit(a__ , a__ ) # Predict target for test data __UpperCAmelCase = xgb.predict(a__ ) __UpperCAmelCase = predictions.reshape(len(a__ ) , 1 ) return predictions def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = fetch_california_housing() __UpperCAmelCase , __UpperCAmelCase = data_handling(a__ ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split( a__ , a__ , test_size=0.25 , random_state=1 ) __UpperCAmelCase = xgboost(a__ , a__ , a__ ) # Error printing print(f"""Mean Absolute Error : {mean_absolute_error(a__ , a__ )}""" ) print(f"""Mean Square Error : {mean_squared_error(a__ , a__ )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
714
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : Optional[Any] = { "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = ["LlamaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = ["LlamaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = [ "LlamaForCausalLM", "LlamaModel", "LlamaPreTrainedModel", "LlamaForSequenceClassification", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
654
0
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = [1] __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0, 0, 0 __UpperCAmelCase = ugly_nums[ia] * 2 __UpperCAmelCase = ugly_nums[ia] * 3 __UpperCAmelCase = ugly_nums[ia] * 5 for _ in range(1 , UpperCamelCase__ ): __UpperCAmelCase = min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ugly_nums.append(UpperCamelCase__ ) if next_num == next_a: ia += 1 __UpperCAmelCase = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 __UpperCAmelCase = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 __UpperCAmelCase = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F"""{ugly_numbers(200) = }""")
715
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ): """simple docstring""" __UpperCAmelCase = {} if train_file is not None: __UpperCAmelCase = [train_file] if eval_file is not None: __UpperCAmelCase = [eval_file] if test_file is not None: __UpperCAmelCase = [test_file] __UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ ) __UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() ) __UpperCAmelCase = features_name.pop(UpperCamelCase__ ) __UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) ) __UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )} __UpperCAmelCase = tokenizer.model_input_names __UpperCAmelCase = {} if len(UpperCamelCase__ ) == 1: for k in files.keys(): __UpperCAmelCase = ds[k].map( lambda UpperCamelCase__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , ) elif len(UpperCamelCase__ ) == 2: for k in files.keys(): __UpperCAmelCase = ds[k].map( lambda UpperCamelCase__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid __lowerCAmelCase : List[Any] = logging.getLogger(__name__) @dataclass class A : a_ = field(metadata={'''help''': '''Which column contains the label'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} ) a_ = field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class A : a_ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def lowerCAmelCase ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ f"""16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , ) def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict: __UpperCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __UpperCAmelCase = TFTrainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCAmelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __UpperCAmelCase = trainer.evaluate() __UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(UpperCamelCase__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) results.update(UpperCamelCase__ ) return results if __name__ == "__main__": main()
654
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCAmelCase : int = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[str] = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[str] = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
716
'''simple docstring''' from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class A : def __init__( self : List[Any] , __a : Any , ) -> Dict: __UpperCAmelCase = parent __UpperCAmelCase = 1_3 __UpperCAmelCase = 7 __UpperCAmelCase = True __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = True __UpperCAmelCase = 9_9 __UpperCAmelCase = 3_2 __UpperCAmelCase = 2 __UpperCAmelCase = 4 __UpperCAmelCase = 3_7 __UpperCAmelCase = '''gelu''' __UpperCAmelCase = 0.1 __UpperCAmelCase = 0.1 __UpperCAmelCase = 5_1_2 __UpperCAmelCase = 1_6 __UpperCAmelCase = 2 __UpperCAmelCase = 0.0_2 __UpperCAmelCase = 3 __UpperCAmelCase = 4 __UpperCAmelCase = None def snake_case__ ( self : Optional[int] ) -> Dict: __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase = None if self.use_input_mask: __UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any: __UpperCAmelCase = TFDistilBertModel(config=__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) __UpperCAmelCase = [input_ids, input_mask] __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int: __UpperCAmelCase = TFDistilBertForMaskedLM(config=__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict: __UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a ) __UpperCAmelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, } __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict: __UpperCAmelCase = self.num_labels __UpperCAmelCase = TFDistilBertForSequenceClassification(__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str: __UpperCAmelCase = self.num_choices __UpperCAmelCase = TFDistilBertForMultipleChoice(__a ) __UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, } __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int: __UpperCAmelCase = self.num_labels __UpperCAmelCase = TFDistilBertForTokenClassification(__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : str ) -> Any: __UpperCAmelCase = self.prepare_config_and_inputs() ((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): a_ = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) a_ = ( { '''feature-extraction''': TFDistilBertModel, '''fill-mask''': TFDistilBertForMaskedLM, '''question-answering''': TFDistilBertForQuestionAnswering, '''text-classification''': TFDistilBertForSequenceClassification, '''token-classification''': TFDistilBertForTokenClassification, '''zero-shot''': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) a_ = False a_ = False def snake_case__ ( self : Any ) -> Any: __UpperCAmelCase = TFDistilBertModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 ) def snake_case__ ( self : List[Any] ) -> Optional[int]: self.config_tester.run_common_tests() def snake_case__ ( self : Any ) -> str: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__a ) def snake_case__ ( self : Tuple ) -> Dict: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__a ) def snake_case__ ( self : Union[str, Any] ) -> Any: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__a ) def snake_case__ ( self : Optional[Any] ) -> Dict: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a ) def snake_case__ ( self : Any ) -> int: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a ) def snake_case__ ( self : List[str] ) -> List[Any]: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__a ) @slow def snake_case__ ( self : Dict ) -> Tuple: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): __UpperCAmelCase = TFDistilBertModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_tf class A ( unittest.TestCase ): @slow def snake_case__ ( self : int ) -> Dict: __UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCAmelCase = model(__a )[0] __UpperCAmelCase = [1, 6, 7_6_8] self.assertEqual(output.shape , __a ) __UpperCAmelCase = tf.constant( [ [ [0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9], [0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4], [0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
654
0
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Any = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { "Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json", "Salesforce/blip-vqa-capfit-large": ( "https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json" ), "Salesforce/blip-image-captioning-base": ( "https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json" ), "Salesforce/blip-image-captioning-large": ( "https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json" ), "Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json", "Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json", "Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json", "Salesforce/blip-itm-large-flikr": ( "https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json" ), } class A ( UpperCamelCase_ ): '''simple docstring''' a_ = '''blip_text_model''' def __init__( self : str , __a : Optional[int]=3_0_5_2_4 , __a : List[str]=7_6_8 , __a : List[str]=7_6_8 , __a : Union[str, Any]=3_0_7_2 , __a : Dict=7_6_8 , __a : str=1_2 , __a : str=8 , __a : List[str]=5_1_2 , __a : int="gelu" , __a : List[Any]=1e-12 , __a : Optional[int]=0.0 , __a : Tuple=0.0 , __a : Any=0.0_2 , __a : Tuple=3_0_5_2_2 , __a : Optional[int]=2 , __a : Any=0 , __a : Dict=1_0_2 , __a : Tuple=True , __a : Tuple=True , **__a : int , ) -> Tuple: super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = encoder_hidden_size __UpperCAmelCase = intermediate_size __UpperCAmelCase = projection_dim __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = hidden_act __UpperCAmelCase = initializer_range __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = is_decoder __UpperCAmelCase = use_cache @classmethod def snake_case__ ( cls : Any , __a : List[str] , **__a : Optional[Any] ) -> "PretrainedConfig": cls._set_token_in_kwargs(__a ) __UpperCAmelCase = cls.get_config_dict(__a , **__a ) # get the text config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": __UpperCAmelCase = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__a , **__a ) class A ( UpperCamelCase_ ): '''simple docstring''' a_ = '''blip_vision_model''' def __init__( self : Optional[Any] , __a : Tuple=7_6_8 , __a : Dict=3_0_7_2 , __a : int=5_1_2 , __a : Any=1_2 , __a : Union[str, Any]=1_2 , __a : List[str]=3_8_4 , __a : Dict=1_6 , __a : Dict="gelu" , __a : Optional[int]=1e-5 , __a : str=0.0 , __a : Optional[Any]=1e-10 , **__a : Any , ) -> int: super().__init__(**__a ) __UpperCAmelCase = hidden_size __UpperCAmelCase = intermediate_size __UpperCAmelCase = projection_dim __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = patch_size __UpperCAmelCase = image_size __UpperCAmelCase = initializer_range __UpperCAmelCase = attention_dropout __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = hidden_act @classmethod def snake_case__ ( cls : Optional[int] , __a : int , **__a : int ) -> "PretrainedConfig": cls._set_token_in_kwargs(__a ) __UpperCAmelCase = cls.get_config_dict(__a , **__a ) # get the vision config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": __UpperCAmelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__a , **__a ) class A ( UpperCamelCase_ ): '''simple docstring''' a_ = '''blip''' a_ = True def __init__( self : Optional[int] , __a : Tuple=None , __a : Optional[Any]=None , __a : Union[str, Any]=5_1_2 , __a : str=2.6_5_9_2 , __a : Tuple=2_5_6 , **__a : Union[str, Any] , ) -> List[str]: super().__init__(**__a ) if text_config is None: __UpperCAmelCase = {} logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' ) if vision_config is None: __UpperCAmelCase = {} logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' ) __UpperCAmelCase = BlipTextConfig(**__a ) __UpperCAmelCase = BlipVisionConfig(**__a ) __UpperCAmelCase = self.vision_config.hidden_size __UpperCAmelCase = projection_dim __UpperCAmelCase = logit_scale_init_value __UpperCAmelCase = 1.0 __UpperCAmelCase = 0.0_2 __UpperCAmelCase = image_text_hidden_size @classmethod def snake_case__ ( cls : Union[str, Any] , __a : Dict , __a : str , **__a : Optional[int] ) -> Any: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a ) def snake_case__ ( self : int ) -> Any: __UpperCAmelCase = copy.deepcopy(self.__dict__ ) __UpperCAmelCase = self.text_config.to_dict() __UpperCAmelCase = self.vision_config.to_dict() __UpperCAmelCase = self.__class__.model_type return output
717
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __lowerCAmelCase : List[Any] = { "configuration_audio_spectrogram_transformer": [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ASTConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ASTForAudioClassification", "ASTModel", "ASTPreTrainedModel", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
654
0
from __future__ import annotations def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ): # noqa: E741 """simple docstring""" while r - l > 1: __UpperCAmelCase = (l + r) // 2 if v[m] >= key: __UpperCAmelCase = m else: __UpperCAmelCase = m # noqa: E741 return r def lowerCAmelCase ( UpperCamelCase__ : List[str] ): """simple docstring""" if len(_lowercase ) == 0: return 0 __UpperCAmelCase = [0] * len(_lowercase ) __UpperCAmelCase = 1 __UpperCAmelCase = v[0] for i in range(1 , len(_lowercase ) ): if v[i] < tail[0]: __UpperCAmelCase = v[i] elif v[i] > tail[length - 1]: __UpperCAmelCase = v[i] length += 1 else: __UpperCAmelCase = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
718
'''simple docstring''' from ...configuration_utils import PretrainedConfig class A ( UpperCAmelCase ): a_ = '''bert-generation''' def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = hidden_act __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = position_embedding_type __UpperCAmelCase = use_cache
654
0
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' ) __UpperCAmelCase = parser.add_subparsers(help='''transformers-cli command helpers''' ) # Register commands ConvertCommand.register_subcommand(_lowerCamelCase ) DownloadCommand.register_subcommand(_lowerCamelCase ) EnvironmentCommand.register_subcommand(_lowerCamelCase ) RunCommand.register_subcommand(_lowerCamelCase ) ServeCommand.register_subcommand(_lowerCamelCase ) UserCommands.register_subcommand(_lowerCamelCase ) AddNewModelCommand.register_subcommand(_lowerCamelCase ) AddNewModelLikeCommand.register_subcommand(_lowerCamelCase ) LfsCommands.register_subcommand(_lowerCamelCase ) PTtoTFCommand.register_subcommand(_lowerCamelCase ) # Let's go __UpperCAmelCase = parser.parse_args() if not hasattr(_lowerCamelCase , '''func''' ): parser.print_help() exit(1 ) # Run __UpperCAmelCase = args.func(_lowerCamelCase ) service.run() if __name__ == "__main__": main()
719
'''simple docstring''' from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) __lowerCAmelCase : str = 299_792_458 # Symbols __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z") def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 ) def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" return np.array( [ [gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0], [-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ): """simple docstring""" # Ensure event is not empty if event is None: __UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(UpperCamelCase__ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: __lowerCAmelCase : Dict = transform(29_979_245) print("Example of four vector: ") print(F"""ct' = {four_vector[0]}""") print(F"""x' = {four_vector[1]}""") print(F"""y' = {four_vector[2]}""") print(F"""z' = {four_vector[3]}""") # Substitute symbols with numerical values __lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1} __lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)] print(F"""\n{numerical_vector}""")
654
0
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ): """simple docstring""" __UpperCAmelCase = len(lowercase_ ) __UpperCAmelCase = len(lowercase_ ) __UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __UpperCAmelCase = True for i in range(lowercase_ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __UpperCAmelCase = True if a[i].islower(): __UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
720
'''simple docstring''' import heapq import sys import numpy as np __lowerCAmelCase : Any = tuple[int, int] class A : def __init__( self : Optional[int] ) -> int: __UpperCAmelCase = [] __UpperCAmelCase = set() def snake_case__ ( self : Optional[Any] ) -> List[Any]: if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def snake_case__ ( self : Dict ) -> Optional[int]: return len(self.elements ) == 0 def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]: if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(__a ) else: # update # print("update", item) __UpperCAmelCase = [] ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def snake_case__ ( self : int , __a : Any ) -> int: if item in self.set: self.set.remove(__a ) __UpperCAmelCase = [] ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def snake_case__ ( self : List[str] ) -> Dict: return self.elements[0][1] def snake_case__ ( self : Any ) -> List[str]: ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) self.set.remove(__a ) return (priority, item) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # euclidean distance __UpperCAmelCase = np.array(UpperCamelCase__ ) __UpperCAmelCase = np.array(UpperCamelCase__ ) return np.linalg.norm(a - b ) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # integer division by time variable return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ): """simple docstring""" __UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ ) return ans def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ): """simple docstring""" __UpperCAmelCase = np.chararray((n, n) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): __UpperCAmelCase = '''*''' for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): if (j, (n - 1) - i) in blocks: __UpperCAmelCase = '''#''' __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[goal] while x != start: ((__UpperCAmelCase) , (__UpperCAmelCase)) = x # print(x) __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[x] __UpperCAmelCase = '''-''' for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) __UpperCAmelCase = back_pointer[goal] while x != start: print(UpperCamelCase__ , end=''' ''' ) __UpperCAmelCase = back_pointer[x] print(UpperCamelCase__ ) sys.exit() def lowerCAmelCase ( UpperCamelCase__ : TPos ): """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ): """simple docstring""" for itera in range(UpperCamelCase__ ): open_list[itera].remove_element(UpperCamelCase__ ) # print("s", s) # print("j", j) ((__UpperCAmelCase) , (__UpperCAmelCase)) = s __UpperCAmelCase = (x - 1, y) __UpperCAmelCase = (x + 1, y) __UpperCAmelCase = (x, y + 1) __UpperCAmelCase = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(UpperCamelCase__ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(UpperCamelCase__ ) __UpperCAmelCase = -1 __UpperCAmelCase = float('''inf''' ) if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1: __UpperCAmelCase = g_function[s] + 1 __UpperCAmelCase = s if neighbours not in close_list_anchor: open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) ) if neighbours not in close_list_inad: for var in range(1 , UpperCamelCase__ ): if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key( UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ): open_list[j].put( UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(1_5 , 2_0 ): some_list.append((x, 1_7) ) for x in range(1_0 , 1_9 ): for y in range(1 , 1_5 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(1_2 , 1_9 ): some_list.append((x, y) ) for x in range(3 , 1_3 ): for y in range(1_6 , 1_9 ): some_list.append((x, y) ) return some_list __lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __lowerCAmelCase : List[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __lowerCAmelCase : Dict = make_common_ground() __lowerCAmelCase : int = blocks_blk # hyper parameters __lowerCAmelCase : Dict = 1 __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : Union[str, Any] = 20 __lowerCAmelCase : Any = 3 # one consistent and two other inconsistent # start and end destination __lowerCAmelCase : Optional[Any] = (0, 0) __lowerCAmelCase : Any = (n - 1, n - 1) __lowerCAmelCase : Optional[int] = 1 def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = {start: 0, goal: float('''inf''' )} __UpperCAmelCase = {start: -1, goal: -1} __UpperCAmelCase = [] __UpperCAmelCase = set() for i in range(UpperCamelCase__ ): open_list.append(PriorityQueue() ) open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) __UpperCAmelCase = [] __UpperCAmelCase = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , UpperCamelCase__ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show() visited.add(UpperCamelCase__ ) expand_state( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) close_list_inad.append(UpperCamelCase__ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __UpperCAmelCase = open_list[0].top_show() visited.add(UpperCamelCase__ ) expand_state( UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) close_list_anchor.append(UpperCamelCase__ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(UpperCamelCase__ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
654
0
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def lowerCAmelCase ( UpperCamelCase__ : Tuple ): # picklable for multiprocessing """simple docstring""" return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def lowerCAmelCase ( ): """simple docstring""" with parallel_backend('''spark''' ): assert ParallelBackendConfig.backend_name == "spark" __UpperCAmelCase = [1, 2, 3] with pytest.raises(lowerCAmelCase__ ): with parallel_backend('''unsupported backend''' ): map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=2 ) with pytest.raises(lowerCAmelCase__ ): with parallel_backend('''unsupported backend''' ): map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize('''num_proc''' , [2, -1] ) def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase = [1, 2] __UpperCAmelCase = {'''a''': 1, '''b''': 2} __UpperCAmelCase = {'''a''': [1, 2], '''b''': [3, 4]} __UpperCAmelCase = {'''a''': {'''1''': 1}, '''b''': 2} __UpperCAmelCase = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} __UpperCAmelCase = [2, 3] __UpperCAmelCase = {'''a''': 2, '''b''': 3} __UpperCAmelCase = {'''a''': [2, 3], '''b''': [4, 5]} __UpperCAmelCase = {'''a''': {'''1''': 2}, '''b''': 3} __UpperCAmelCase = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} with parallel_backend('''spark''' ): assert map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) == expected_map_nested_sa assert map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) == expected_map_nested_sa assert map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) == expected_map_nested_sa assert map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) == expected_map_nested_sa assert map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) == expected_map_nested_sa
721
'''simple docstring''' import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py __lowerCAmelCase : List[Any] = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. __lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") __lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) __lowerCAmelCase : Optional[int] = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ] def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ ) return [m.group(0 ) for m in matches] def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __UpperCAmelCase = { config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. __UpperCAmelCase = collections.defaultdict(UpperCamelCase__ ) __UpperCAmelCase = collections.defaultdict(UpperCamelCase__ ) __UpperCAmelCase = collections.defaultdict(UpperCamelCase__ ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(UpperCamelCase__ ): __UpperCAmelCase = None if _re_tf_models.match(UpperCamelCase__ ) is not None: __UpperCAmelCase = tf_models __UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0] elif _re_flax_models.match(UpperCamelCase__ ) is not None: __UpperCAmelCase = flax_models __UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0] elif _re_pt_models.match(UpperCamelCase__ ) is not None: __UpperCAmelCase = pt_models __UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0] if lookup_dict is not None: while len(UpperCamelCase__ ) > 0: if attr_name in model_prefix_to_model_type: __UpperCAmelCase = True break # Try again after removing the last word in the name __UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] ) __UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) __UpperCAmelCase = list(UpperCamelCase__ ) all_models.sort() __UpperCAmelCase = {'''model_type''': all_models} __UpperCAmelCase = [pt_models[t] for t in all_models] __UpperCAmelCase = [tf_models[t] for t in all_models] __UpperCAmelCase = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure __UpperCAmelCase = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: __UpperCAmelCase = '''AutoProcessor''' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: __UpperCAmelCase = '''AutoTokenizer''' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: __UpperCAmelCase = '''AutoFeatureExtractor''' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. __UpperCAmelCase = '''AutoTokenizer''' __UpperCAmelCase = [processors[t] for t in all_models] return pd.DataFrame(UpperCamelCase__ ) def lowerCAmelCase ( UpperCamelCase__ : List[str] ): """simple docstring""" __UpperCAmelCase = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: __UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""] __UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): # The type of pipeline may not exist in this framework if not hasattr(UpperCamelCase__ , UpperCamelCase__ ): continue # First extract all model_names __UpperCAmelCase = [] for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): model_names.append(UpperCamelCase__ ) else: model_names.extend(list(UpperCamelCase__ ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ): """simple docstring""" __UpperCAmelCase = get_frameworks_table() __UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ ) __UpperCAmelCase = hf_hub_download( '''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ ) __UpperCAmelCase = Dataset.from_json(UpperCamelCase__ ) __UpperCAmelCase = { tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class''']) for i in range(len(UpperCamelCase__ ) ) } __UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. __UpperCAmelCase = sorted(table.keys() ) __UpperCAmelCase = pd.DataFrame( { '''model_class''': model_classes, '''pipeline_tag''': [table[m][0] for m in model_classes], '''auto_class''': [table[m][1] for m in model_classes], } ) __UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) ) tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) ) if commit_sha is not None: __UpperCAmelCase = ( f"""Update with commit {commit_sha}\n\nSee: """ f"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: __UpperCAmelCase = '''Update''' upload_folder( repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} __UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS __UpperCAmelCase = [] for key in pipeline_tasks: if key not in in_table: __UpperCAmelCase = pipeline_tasks[key]['''pt'''] if isinstance(UpperCamelCase__ , (list, tuple) ): __UpperCAmelCase = model[0] __UpperCAmelCase = model.__name__ if model not in in_table.values(): missing.append(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: __UpperCAmelCase = ''', '''.join(UpperCamelCase__ ) raise ValueError( '''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ''' f"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") __lowerCAmelCase : Tuple = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
654
0
from typing import Any def lowerCAmelCase ( UpperCamelCase__ : list ): """simple docstring""" if not input_list: return [] __UpperCAmelCase = [input_list.count(_lowerCamelCase ) for value in input_list] __UpperCAmelCase = max(_lowerCamelCase ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(_lowerCamelCase ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
700
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) __lowerCAmelCase : Optional[int] = [ "cross_validation.py", "gradient_accumulation.py", "local_sgd.py", "multi_process_metrics.py", "memory.py", "automatic_gradient_accumulation.py", "fsdp_with_peak_mem_tracking.py", "deepspeed_with_config_support.py", "megatron_lm_gpt_pretraining.py", ] class A ( unittest.TestCase ): def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple: __UpperCAmelCase = None __UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) __UpperCAmelCase = os.path.abspath('''examples''' ) for item in os.listdir(__a ): if item not in EXCLUDE_EXAMPLES: __UpperCAmelCase = os.path.join(__a , __a ) if os.path.isfile(__a ) and ".py" in item_path: with self.subTest( tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ): __UpperCAmelCase = compare_against_test( os.path.join(__a , __a ) , __a , __a , __a ) __UpperCAmelCase = '''\n'''.join(__a ) if special_strings is not None: for string in special_strings: __UpperCAmelCase = diff.replace(__a , '''''' ) self.assertEqual(__a , '''''' ) def snake_case__ ( self : Optional[Any] ) -> str: self.one_complete_example('''complete_nlp_example.py''' , __a ) self.one_complete_example('''complete_nlp_example.py''' , __a ) def snake_case__ ( self : List[str] ) -> Tuple: __UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) __UpperCAmelCase = [ ''' ''' * 1_6 + '''{\n\n''', ''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 2_0 + '''"epoch": epoch,\n\n''', ''' ''' * 1_6 + '''},\n\n''', ''' ''' * 1_6 + '''step=epoch,\n''', ''' ''' * 1_2, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a ) self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a ) @mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} ) class A ( UpperCAmelCase ): a_ = False @classmethod def snake_case__ ( cls : Tuple ) -> str: super().setUpClass() __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) __UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case__ ( cls : Dict ) -> int: super().tearDownClass() shutil.rmtree(cls._tmpdir ) def snake_case__ ( self : Tuple ) -> Dict: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def snake_case__ ( self : str ) -> int: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() __UpperCAmelCase = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def snake_case__ ( self : Any ) -> Any: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )} """.split() __UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a ) self.assertNotIn('''epoch 0:''' , __a ) self.assertIn('''epoch 1:''' , __a ) def snake_case__ ( self : Tuple ) -> Optional[int]: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )} """.split() __UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a ) if torch.cuda.is_available(): __UpperCAmelCase = torch.cuda.device_count() else: __UpperCAmelCase = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , __a ) self.assertIn('''epoch 1:''' , __a ) else: self.assertIn('''epoch 0:''' , __a ) self.assertIn('''epoch 1:''' , __a ) @slow def snake_case__ ( self : Any ) -> Optional[Any]: __UpperCAmelCase = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): __UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a ) __UpperCAmelCase = re.findall('''({.+})''' , __a ) __UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1] __UpperCAmelCase = ast.literal_eval(__a ) self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 ) def snake_case__ ( self : Dict ) -> int: __UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Optional[Any] ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmpdir: __UpperCAmelCase = f""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) ) def snake_case__ ( self : Optional[int] ) -> List[Any]: __UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def snake_case__ ( self : Tuple ) -> Optional[Any]: __UpperCAmelCase = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
654
0
__lowerCAmelCase : str = { """Pillow""": """Pillow""", """accelerate""": """accelerate>=0.11.0""", """compel""": """compel==0.1.8""", """black""": """black~=23.1""", """datasets""": """datasets""", """filelock""": """filelock""", """flax""": """flax>=0.4.1""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.13.2""", """requests-mock""": """requests-mock==1.10.0""", """importlib_metadata""": """importlib_metadata""", """invisible-watermark""": """invisible-watermark""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2""", """jaxlib""": """jaxlib>=0.1.65""", """Jinja2""": """Jinja2""", """k-diffusion""": """k-diffusion>=0.0.12""", """torchsde""": """torchsde""", """note_seq""": """note_seq""", """librosa""": """librosa""", """numpy""": """numpy""", """omegaconf""": """omegaconf""", """parameterized""": """parameterized""", """protobuf""": """protobuf>=3.20.3,<4""", """pytest""": """pytest""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """ruff""": """ruff>=0.0.241""", """safetensors""": """safetensors""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """scipy""": """scipy""", """onnx""": """onnx""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """tensorboard""": """tensorboard""", """torch""": """torch>=1.4""", """torchvision""": """torchvision""", """transformers""": """transformers>=4.25.1""", """urllib3""": """urllib3<=2.0.0""", }
701
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva __lowerCAmelCase : Any = "" __lowerCAmelCase : int = "" __lowerCAmelCase : Union[str, Any] = "" __lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ ) print('''Processing...''' ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for index, image in enumerate(UpperCamelCase__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __UpperCAmelCase = random_chars(3_2 ) __UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] ) print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" ) __UpperCAmelCase = [] for anno in new_annos[index]: __UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(UpperCamelCase__ ) with open(f"""/{file_root}.txt""" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ): """simple docstring""" __UpperCAmelCase = [] __UpperCAmelCase = [] for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ): __UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(UpperCamelCase__ ) as in_file: __UpperCAmelCase = in_file.readlines() __UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" ) __UpperCAmelCase = [] for obj_list in obj_lists: __UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(UpperCamelCase__ ) labels.append(UpperCamelCase__ ) return img_paths, labels def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ): """simple docstring""" __UpperCAmelCase = [] __UpperCAmelCase = [] __UpperCAmelCase = [] for idx in range(len(UpperCamelCase__ ) ): __UpperCAmelCase = [] __UpperCAmelCase = img_list[idx] path_list.append(UpperCamelCase__ ) __UpperCAmelCase = anno_list[idx] __UpperCAmelCase = cva.imread(UpperCamelCase__ ) if flip_type == 1: __UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ ) for bbox in img_annos: __UpperCAmelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ ) for bbox in img_annos: __UpperCAmelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(UpperCamelCase__ ) new_imgs_list.append(UpperCamelCase__ ) return new_imgs_list, new_annos_lists, path_list def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" __UpperCAmelCase = ascii_lowercase + digits return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) ) if __name__ == "__main__": main() print("DONE ✅")
654
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCAmelCase : Tuple = { "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = ["VisionTextDualEncoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Any = ["FlaxVisionTextDualEncoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[int] = ["TFVisionTextDualEncoderModel"] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys __lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
702
'''simple docstring''' from pathlib import Path import fire def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = Path(UpperCamelCase__ ) __UpperCAmelCase = Path(UpperCamelCase__ ) dest_dir.mkdir(exist_ok=UpperCamelCase__ ) for path in src_dir.iterdir(): __UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n] __UpperCAmelCase = dest_dir.joinpath(path.name ) print(UpperCamelCase__ ) dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) ) if __name__ == "__main__": fire.Fire(minify)
654
0
'''simple docstring''' import cmath import math def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ): """simple docstring""" __UpperCAmelCase = math.radians(SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase = math.radians(SCREAMING_SNAKE_CASE_ ) # Convert voltage and current to rectangular form __UpperCAmelCase = cmath.rect(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase = cmath.rect(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
703
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): __UpperCAmelCase = f"""Input value of [number={number}] must be an integer""" raise TypeError(UpperCamelCase__ ) if number < 1: __UpperCAmelCase = f"""Input value of [number={number}] must be > 0""" raise ValueError(UpperCamelCase__ ) __UpperCAmelCase = 1 for i in range(1 , UpperCamelCase__ ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
654
0
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def lowerCAmelCase ( UpperCamelCase__ : List[Any] ): """simple docstring""" return x + 2 class A ( unittest.TestCase ): def snake_case__ ( self : List[Any] ) -> int: __UpperCAmelCase = 'x = 3' __UpperCAmelCase = {} __UpperCAmelCase = evaluate(__a , {} , state=__a ) assert result == 3 self.assertDictEqual(__a , {'''x''': 3} ) __UpperCAmelCase = 'x = y' __UpperCAmelCase = {'y': 5} __UpperCAmelCase = evaluate(__a , {} , state=__a ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__a , {'''x''': 5, '''y''': 5} ) def snake_case__ ( self : Optional[int] ) -> Union[str, Any]: __UpperCAmelCase = 'y = add_two(x)' __UpperCAmelCase = {'x': 3} __UpperCAmelCase = evaluate(__a , {'''add_two''': add_two} , state=__a ) assert result == 5 self.assertDictEqual(__a , {'''x''': 3, '''y''': 5} ) # Won't work without the tool with CaptureStdout() as out: __UpperCAmelCase = evaluate(__a , {} , state=__a ) assert result is None assert "tried to execute add_two" in out.out def snake_case__ ( self : Any ) -> Optional[int]: __UpperCAmelCase = 'x = 3' __UpperCAmelCase = {} __UpperCAmelCase = evaluate(__a , {} , state=__a ) assert result == 3 self.assertDictEqual(__a , {'''x''': 3} ) def snake_case__ ( self : List[str] ) -> Optional[Any]: __UpperCAmelCase = 'test_dict = {\'x\': x, \'y\': add_two(x)}' __UpperCAmelCase = {'x': 3} __UpperCAmelCase = evaluate(__a , {'''add_two''': add_two} , state=__a ) self.assertDictEqual(__a , {'''x''': 3, '''y''': 5} ) self.assertDictEqual(__a , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__ ( self : Optional[Any] ) -> int: __UpperCAmelCase = 'x = 3\ny = 5' __UpperCAmelCase = {} __UpperCAmelCase = evaluate(__a , {} , state=__a ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__a , {'''x''': 3, '''y''': 5} ) def snake_case__ ( self : List[Any] ) -> Optional[int]: __UpperCAmelCase = 'text = f\'This is x: {x}.\'' __UpperCAmelCase = {'x': 3} __UpperCAmelCase = evaluate(__a , {} , state=__a ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__a , {'''x''': 3, '''text''': '''This is x: 3.'''} ) def snake_case__ ( self : Dict ) -> Any: __UpperCAmelCase = 'if x <= 3:\n y = 2\nelse:\n y = 5' __UpperCAmelCase = {'x': 3} __UpperCAmelCase = evaluate(__a , {} , state=__a ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__a , {'''x''': 3, '''y''': 2} ) __UpperCAmelCase = {'x': 8} __UpperCAmelCase = evaluate(__a , {} , state=__a ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__a , {'''x''': 8, '''y''': 5} ) def snake_case__ ( self : int ) -> Any: __UpperCAmelCase = 'test_list = [x, add_two(x)]' __UpperCAmelCase = {'x': 3} __UpperCAmelCase = evaluate(__a , {'''add_two''': add_two} , state=__a ) self.assertListEqual(__a , [3, 5] ) self.assertDictEqual(__a , {'''x''': 3, '''test_list''': [3, 5]} ) def snake_case__ ( self : Any ) -> Any: __UpperCAmelCase = 'y = x' __UpperCAmelCase = {'x': 3} __UpperCAmelCase = evaluate(__a , {} , state=__a ) assert result == 3 self.assertDictEqual(__a , {'''x''': 3, '''y''': 3} ) def snake_case__ ( self : Optional[Any] ) -> Optional[int]: __UpperCAmelCase = 'test_list = [x, add_two(x)]\ntest_list[1]' __UpperCAmelCase = {'x': 3} __UpperCAmelCase = evaluate(__a , {'''add_two''': add_two} , state=__a ) assert result == 5 self.assertDictEqual(__a , {'''x''': 3, '''test_list''': [3, 5]} ) __UpperCAmelCase = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']' __UpperCAmelCase = {'x': 3} __UpperCAmelCase = evaluate(__a , {'''add_two''': add_two} , state=__a ) assert result == 5 self.assertDictEqual(__a , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__ ( self : Optional[int] ) -> Tuple: __UpperCAmelCase = 'x = 0\nfor i in range(3):\n x = i' __UpperCAmelCase = {} __UpperCAmelCase = evaluate(__a , {'''range''': range} , state=__a ) assert result == 2 self.assertDictEqual(__a , {'''x''': 2, '''i''': 2} )
704
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowerCAmelCase ( ): """simple docstring""" raise RuntimeError('''CUDA out of memory.''' ) class A ( nn.Module ): def __init__( self : Optional[Any] ) -> int: super().__init__() __UpperCAmelCase = nn.Linear(3 , 4 ) __UpperCAmelCase = nn.BatchNormad(4 ) __UpperCAmelCase = nn.Linear(4 , 5 ) def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]: return self.lineara(self.batchnorm(self.lineara(__a ) ) ) class A ( unittest.TestCase ): def snake_case__ ( self : Optional[int] ) -> Any: __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_2_8 ) def mock_training_loop_function(__a : Union[str, Any] ): nonlocal batch_sizes batch_sizes.append(__a ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] ) def snake_case__ ( self : str ) -> int: __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_2_8 ) def mock_training_loop_function(__a : str , __a : Optional[int] ): nonlocal batch_sizes batch_sizes.append(__a ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga __UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' ) self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def snake_case__ ( self : Any ) -> int: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(__a : Optional[int] ): pass with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def snake_case__ ( self : Any ) -> List[Any]: @find_executable_batch_size(starting_batch_size=1_6 ) def mock_training_loop_function(__a : Dict ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def snake_case__ ( self : List[Any] ) -> List[str]: @find_executable_batch_size(starting_batch_size=1_2_8 ) def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(__a ) as cm: mock_training_loop_function(1_2_8 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def snake_case__ ( self : Tuple ) -> Optional[Any]: @find_executable_batch_size(starting_batch_size=1_6 ) def mock_training_loop_function(__a : Tuple ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def snake_case__ ( self : Any ) -> List[Any]: __UpperCAmelCase = torch.cuda.memory_allocated() __UpperCAmelCase = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , __a ) __UpperCAmelCase = release_memory(__a ) self.assertEqual(torch.cuda.memory_allocated() , __a )
654
0
'''simple docstring''' import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class A ( __lowercase , unittest.TestCase ): a_ = VideoToVideoSDPipeline a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''} a_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''} a_ = PipelineTesterMixin.required_optional_params - {'''latents'''} a_ = False # No `output_type`. a_ = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]: torch.manual_seed(0 ) __UpperCAmelCase = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=3_2 , attention_head_dim=4 , ) __UpperCAmelCase = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , ) torch.manual_seed(0 ) __UpperCAmelCase = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , ) __UpperCAmelCase = CLIPTextModel(_A ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def snake_case__ ( self : List[str] , __a : Any , __a : str=0 ) -> Optional[Any]: # 3 frames __UpperCAmelCase = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(_A ) ).to(_A ) if str(_A ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(_A ) else: __UpperCAmelCase = torch.Generator(device=_A ).manual_seed(_A ) __UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''video''': video, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def snake_case__ ( self : List[Any] ) -> str: __UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = VideoToVideoSDPipeline(**_A ) __UpperCAmelCase = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) __UpperCAmelCase = self.get_dummy_inputs(_A ) __UpperCAmelCase = '''np''' __UpperCAmelCase = sd_pipe(**_A ).frames __UpperCAmelCase = frames[0][-3:, -3:, -1] assert frames[0].shape == (3_2, 3_2, 3) __UpperCAmelCase = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_A , expected_max_diff=5e-3 ) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def snake_case__ ( self : List[Any] ) -> Tuple: pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def snake_case__ ( self : int ) -> str: pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' ) def snake_case__ ( self : List[str] ) -> str: pass def snake_case__ ( self : List[str] ) -> List[Any]: return super().test_progress_bar() @slow @skip_mps class A ( unittest.TestCase ): def snake_case__ ( self : List[str] ) -> str: __UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames __UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __UpperCAmelCase = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=_A ) __UpperCAmelCase = video.to('''cuda''' ) __UpperCAmelCase = '''Spiderman is surfing''' __UpperCAmelCase = pipe(_A , video=_A , generator=_A , num_inference_steps=3 , output_type='''pt''' ).frames __UpperCAmelCase = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
705
'''simple docstring''' from __future__ import annotations import math def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = u for i in range(1 , UpperCamelCase__ ): __UpperCAmelCase = temp * (u - i) return temp def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = int(input('''enter the numbers of values: ''' ) ) __UpperCAmelCase = [] for _ in range(UpperCamelCase__ ): y.append([] ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): y[i].append(UpperCamelCase__ ) __UpperCAmelCase = 0 print('''enter the values of parameters in a list: ''' ) __UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) ) print('''enter the values of corresponding parameters: ''' ) for i in range(UpperCamelCase__ ): __UpperCAmelCase = float(input() ) __UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) ) __UpperCAmelCase = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , UpperCamelCase__ ): for j in range(n - i ): __UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1] __UpperCAmelCase = y[0][0] for i in range(1 , UpperCamelCase__ ): summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ ) print(f"""the value at {value} is {summ}""" ) if __name__ == "__main__": main()
654
0
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : List[str] = 0 ): """simple docstring""" __UpperCAmelCase = length or len(lowerCAmelCase__ ) __UpperCAmelCase = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: __UpperCAmelCase , __UpperCAmelCase = list_data[i + 1], list_data[i] __UpperCAmelCase = True return list_data if not swapped else bubble_sort(lowerCAmelCase__ , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
706
'''simple docstring''' import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : Dict = logging.get_logger(__name__) def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ): """simple docstring""" __UpperCAmelCase = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''), ('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''), ('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''), ('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''), ('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''), ('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''), ] ) return rename_keys def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ): """simple docstring""" for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) __UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" ) __UpperCAmelCase = in_proj_weight[ : encoder_config.hidden_size, : ] __UpperCAmelCase = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] __UpperCAmelCase = in_proj_weight[ -encoder_config.hidden_size :, : ] def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ): """simple docstring""" __UpperCAmelCase = dct.pop(UpperCamelCase__ ) __UpperCAmelCase = val def lowerCAmelCase ( UpperCamelCase__ : Dict ): """simple docstring""" if "handwritten" in checkpoint_url: __UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: __UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg''' __UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' ) return im @torch.no_grad() def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ): """simple docstring""" __UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ ) __UpperCAmelCase = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: __UpperCAmelCase = 7_6_8 elif "large" in checkpoint_url: # use ViT-large encoder __UpperCAmelCase = 1_0_2_4 __UpperCAmelCase = 4_0_9_6 __UpperCAmelCase = 2_4 __UpperCAmelCase = 1_6 __UpperCAmelCase = 1_0_2_4 else: raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: __UpperCAmelCase = False __UpperCAmelCase = '''relu''' __UpperCAmelCase = 1_0_2_4 __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = False # load HuggingFace model __UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ ) __UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ ) __UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) model.eval() # load state_dict of original model, rename some keys __UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model'''] __UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): __UpperCAmelCase = state_dict.pop(UpperCamelCase__ ) if key.startswith('''decoder''' ) and "output_projection" not in key: __UpperCAmelCase = val else: __UpperCAmelCase = val # load state dict model.load_state_dict(UpperCamelCase__ ) # Check outputs on an image __UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size ) __UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' ) __UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ ) __UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values # verify logits __UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) __UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ) __UpperCAmelCase = outputs.logits __UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] ) if "trocr-base-handwritten" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] ) elif "trocr-large-handwritten" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] ) elif "trocr-base-printed" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] ) elif "trocr-large-printed" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected" Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(UpperCamelCase__ ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) __lowerCAmelCase : Optional[int] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
654
0
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker __lowerCAmelCase : Union[str, Any] = "CompVis/stable-diffusion-v1-1" __lowerCAmelCase : Optional[Any] = "CompVis/stable-diffusion-v1-2" __lowerCAmelCase : Tuple = "CompVis/stable-diffusion-v1-3" __lowerCAmelCase : List[Any] = "CompVis/stable-diffusion-v1-4" class A ( __lowerCamelCase ): def __init__( self : Optional[int] , __a : List[Any] , __a : int , __a : Any , __a : str , __a : int , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[int] = True , ) -> Dict: super()._init_() __UpperCAmelCase = StableDiffusionPipeline.from_pretrained(a_ ) __UpperCAmelCase = StableDiffusionPipeline.from_pretrained(a_ ) __UpperCAmelCase = StableDiffusionPipeline.from_pretrained(a_ ) __UpperCAmelCase = StableDiffusionPipeline( vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=a_ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def snake_case__ ( self : Union[str, Any] ) -> Tuple: return {k: getattr(self , a_ ) for k in self.config.keys() if not k.startswith('''_''' )} def snake_case__ ( self : Optional[int] , __a : List[Any] = "auto" ) -> Dict: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __UpperCAmelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(a_ ) def snake_case__ ( self : int ) -> List[Any]: self.enable_attention_slicing(a_ ) @torch.no_grad() def snake_case__ ( self : Tuple , __a : Optional[int] , __a : Optional[Any] = 5_1_2 , __a : Dict = 5_1_2 , __a : int = 5_0 , __a : Tuple = 7.5 , __a : Optional[int] = None , __a : Dict = 1 , __a : Dict = 0.0 , __a : Optional[Any] = None , __a : Any = None , __a : Union[str, Any] = "pil" , __a : Union[str, Any] = True , __a : Tuple = None , __a : str = 1 , **__a : List[Any] , ) -> List[Any]: return self.pipea( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) @torch.no_grad() def snake_case__ ( self : Union[str, Any] , __a : Optional[Any] , __a : Any = 5_1_2 , __a : List[Any] = 5_1_2 , __a : int = 5_0 , __a : Optional[Any] = 7.5 , __a : str = None , __a : Optional[int] = 1 , __a : Optional[Any] = 0.0 , __a : int = None , __a : List[str] = None , __a : str = "pil" , __a : List[str] = True , __a : Optional[Any] = None , __a : List[str] = 1 , **__a : int , ) -> Tuple: return self.pipea( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) @torch.no_grad() def snake_case__ ( self : Tuple , __a : Optional[int] , __a : int = 5_1_2 , __a : Optional[Any] = 5_1_2 , __a : Any = 5_0 , __a : Tuple = 7.5 , __a : Tuple = None , __a : List[Any] = 1 , __a : Tuple = 0.0 , __a : Tuple = None , __a : int = None , __a : Dict = "pil" , __a : Union[str, Any] = True , __a : Union[str, Any] = None , __a : Tuple = 1 , **__a : Optional[int] , ) -> Dict: return self.pipea( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) @torch.no_grad() def snake_case__ ( self : Union[str, Any] , __a : List[Any] , __a : Optional[int] = 5_1_2 , __a : int = 5_1_2 , __a : Optional[int] = 5_0 , __a : Optional[Any] = 7.5 , __a : Optional[Any] = None , __a : List[str] = 1 , __a : Optional[Any] = 0.0 , __a : List[str] = None , __a : Union[str, Any] = None , __a : Tuple = "pil" , __a : Any = True , __a : Union[str, Any] = None , __a : Optional[Any] = 1 , **__a : Union[str, Any] , ) -> Optional[int]: return self.pipea( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) @torch.no_grad() def snake_case__ ( self : List[Any] , __a : Any , __a : Optional[int] = 5_1_2 , __a : int = 5_1_2 , __a : Optional[Any] = 5_0 , __a : str = 7.5 , __a : Any = None , __a : Union[str, Any] = 1 , __a : List[str] = 0.0 , __a : Union[str, Any] = None , __a : str = None , __a : Dict = "pil" , __a : str = True , __a : int = None , __a : int = 1 , **__a : Tuple , ) -> Optional[int]: __UpperCAmelCase = "cuda" if torch.cuda.is_available() else "cpu" self.to(a_ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 __UpperCAmelCase = self.textaimg_sda_a( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) # Get first result from Stable Diffusion Checkpoint v1.2 __UpperCAmelCase = self.textaimg_sda_a( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) # Get first result from Stable Diffusion Checkpoint v1.3 __UpperCAmelCase = self.textaimg_sda_a( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) # Get first result from Stable Diffusion Checkpoint v1.4 __UpperCAmelCase = self.textaimg_sda_a( prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
707
'''simple docstring''' import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class A ( unittest.TestCase ): def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]: return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy""" def snake_case__ ( self : Dict ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple: __UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa __UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a ) return image def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any: __UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa __UpperCAmelCase = '''bf16''' if fpaa else None __UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained( __a , subfolder='''unet''' , dtype=__a , revision=__a ) return model, params def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]: __UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa __UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]], [1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]], [8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]], [3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]], # fmt: on ] ) def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any: __UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a ) __UpperCAmelCase = self.get_latents(__a , fpaa=__a ) __UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a ) __UpperCAmelCase = model.apply( {'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample assert sample.shape == latents.shape __UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__a , __a , atol=1e-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]], [1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]], [8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]], [3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]], # fmt: on ] ) def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a ) __UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a ) __UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a ) __UpperCAmelCase = model.apply( {'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample assert sample.shape == latents.shape __UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__a , __a , atol=1e-2 )
654
0
'''simple docstring''' from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder __lowerCAmelCase : Dict = datasets.utils.logging.get_logger(__name__) class A ( folder_based_builder.FolderBasedBuilderConfig ): a_ = None a_ = None class A ( folder_based_builder.FolderBasedBuilder ): a_ = datasets.Audio() a_ = '''audio''' a_ = AudioFolderConfig a_ = 4_2 # definition at the bottom of the script a_ = AudioClassification(audio_column='''audio''' , label_column='''label''' ) __lowerCAmelCase : int = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] __lowerCAmelCase : str = AUDIO_EXTENSIONS
708
'''simple docstring''' import argparse import os import re import packaging.version __lowerCAmelCase : Optional[int] = "examples/" __lowerCAmelCase : Dict = { "examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","), "doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"), } __lowerCAmelCase : List[str] = { "init": "src/transformers/__init__.py", "setup": "setup.py", } __lowerCAmelCase : int = "README.md" def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ): """simple docstring""" with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __UpperCAmelCase = f.read() __UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern] __UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ ) __UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(UpperCamelCase__ ) def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ): """simple docstring""" for folder, directories, fnames in os.walk(UpperCamelCase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' ) def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if not patch: update_version_in_examples(UpperCamelCase__ ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = '''🤗 Transformers currently provides the following architectures''' __UpperCAmelCase = '''1. Want to contribute a new model?''' with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __UpperCAmelCase = f.readlines() # Find the start of the list. __UpperCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __UpperCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): __UpperCAmelCase = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(UpperCamelCase__ ) def lowerCAmelCase ( ): """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: __UpperCAmelCase = f.read() __UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0] return packaging.version.parse(UpperCamelCase__ ) def lowerCAmelCase ( UpperCamelCase__ : Any=False ): """simple docstring""" __UpperCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: __UpperCAmelCase = default_version.base_version elif patch: __UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" ) if len(UpperCamelCase__ ) == 0: __UpperCAmelCase = default_version print(f"""Updating version to {version}.""" ) global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = get_version() __UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __UpperCAmelCase = current_version.base_version # Check with the user we got that right. __UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(UpperCamelCase__ ) == 0: __UpperCAmelCase = dev_version print(f"""Updating version to {version}.""" ) global_version_update(UpperCamelCase__ ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": __lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") __lowerCAmelCase : Tuple = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
654
0
'''simple docstring''' import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __lowerCAmelCase : int = logging.getLogger(__name__) __lowerCAmelCase : Union[str, Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) __lowerCAmelCase : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class SCREAMING_SNAKE_CASE_ : a_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Leave None if you want to train a model from''' ''' scratch.''' ) } , ) a_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) a_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) a_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) a_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class SCREAMING_SNAKE_CASE_ : a_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The input training data file (a text file).'''} ) a_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The input training data files (multiple files in glob format). ''' '''Very often splitting large files to smaller files can prevent tokenizer going out of memory''' ) } , ) a_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) a_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , ) a_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , ) a_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , ) a_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} ) a_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} ) a_ = field( default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) a_ = field( default=1 / 6 , metadata={ '''help''': ( '''Ratio of length of a span of masked tokens to surrounding context length for permutation language''' ''' modeling.''' ) } , ) a_ = field( default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} ) a_ = field( default=-1 , metadata={ '''help''': ( '''Optional input sequence length after tokenization.''' '''The training dataset will be truncated in block of this size for training.''' '''Default to the model max input length for single sentence inputs (take into account special tokens).''' ) } , ) a_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def lowerCAmelCase ( UpperCamelCase__ : DataTrainingArguments , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[str] = None , ): """simple docstring""" def _dataset(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' ) return LineByLineWithRefDataset( tokenizer=_A , file_path=_A , block_size=args.block_size , ref_path=_A , ) return LineByLineTextDataset(tokenizer=_A , file_path=_A , block_size=args.block_size ) else: return TextDataset( tokenizer=_A , file_path=_A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_A , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(_A ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( '''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ''' '''or remove the --do_eval argument.''' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , _A ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: __UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: __UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.tokenizer_name: __UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another''' ''' script, save it,and load it from here, using --tokenizer_name''' ) if model_args.model_name_or_path: __UpperCAmelCase = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , ) else: logger.info('''Training new model from scratch''' ) __UpperCAmelCase = AutoModelWithLMHead.from_config(_A ) model.resize_token_embeddings(len(_A ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( '''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the''' '''--mlm flag (masked language modeling).''' ) if data_args.block_size <= 0: __UpperCAmelCase = tokenizer.max_len # Our input block size will be the max possible for the model else: __UpperCAmelCase = min(data_args.block_size , tokenizer.max_len ) # Get datasets __UpperCAmelCase = ( get_dataset(_A , tokenizer=_A , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) __UpperCAmelCase = ( get_dataset(_A , tokenizer=_A , evaluate=_A , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": __UpperCAmelCase = DataCollatorForPermutationLanguageModeling( tokenizer=_A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: __UpperCAmelCase = DataCollatorForWholeWordMask( tokenizer=_A , mlm_probability=data_args.mlm_probability ) else: __UpperCAmelCase = DataCollatorForLanguageModeling( tokenizer=_A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer __UpperCAmelCase = Trainer( model=_A , args=_A , data_collator=_A , train_dataset=_A , eval_dataset=_A , prediction_loss_only=_A , ) # Training if training_args.do_train: __UpperCAmelCase = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=_A ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCAmelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __UpperCAmelCase = trainer.evaluate() __UpperCAmelCase = math.exp(eval_output['''eval_loss'''] ) __UpperCAmelCase = {'''perplexity''': perplexity} __UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' ) if trainer.is_world_master(): with open(_A , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , _A , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) results.update(_A ) return results def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] ): """simple docstring""" main() if __name__ == "__main__": main()
709
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : Tuple ): """simple docstring""" # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection __UpperCAmelCase = len(UpperCamelCase__ ) __UpperCAmelCase = max(UpperCamelCase__ ) __UpperCAmelCase = min(UpperCamelCase__ ) # create the counting array __UpperCAmelCase = coll_max + 1 - coll_min __UpperCAmelCase = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , UpperCamelCase__ ): __UpperCAmelCase = counting_arr[i] + counting_arr[i - 1] # create the output collection __UpperCAmelCase = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , UpperCamelCase__ ) ): __UpperCAmelCase = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def lowerCAmelCase ( UpperCamelCase__ : Any ): """simple docstring""" return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt" __lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip() __lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")] print(counting_sort(unsorted))
654
0
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __lowerCAmelCase : List[str] = TypeVar("KEY") __lowerCAmelCase : str = TypeVar("VAL") @dataclass(frozen=_A , slots=_A ) class A ( Generic[KEY, VAL] ): a_ = 4_2 a_ = 4_2 class A ( _Item ): def __init__( self : List[str] ) -> None: super().__init__(UpperCamelCase__ , UpperCamelCase__ ) def __bool__( self : Union[str, Any] ) -> bool: return False __lowerCAmelCase : str = _DeletedItem() class A ( MutableMapping[KEY, VAL] ): def __init__( self : List[Any] , __a : int = 8 , __a : float = 0.7_5 ) -> None: __UpperCAmelCase = initial_block_size __UpperCAmelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __UpperCAmelCase = capacity_factor __UpperCAmelCase = 0 def snake_case__ ( self : List[str] , __a : KEY ) -> int: return hash(UpperCamelCase__ ) % len(self._buckets ) def snake_case__ ( self : int , __a : int ) -> int: return (ind + 1) % len(self._buckets ) def snake_case__ ( self : Union[str, Any] , __a : int , __a : KEY , __a : VAL ) -> bool: __UpperCAmelCase = self._buckets[ind] if not stored: __UpperCAmelCase = _Item(UpperCamelCase__ , UpperCamelCase__ ) self._len += 1 return True elif stored.key == key: __UpperCAmelCase = _Item(UpperCamelCase__ , UpperCamelCase__ ) return True else: return False def snake_case__ ( self : Union[str, Any] ) -> bool: __UpperCAmelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(UpperCamelCase__ ) def snake_case__ ( self : int ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False __UpperCAmelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def snake_case__ ( self : int , __a : int ) -> None: __UpperCAmelCase = self._buckets __UpperCAmelCase = [None] * new_size __UpperCAmelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def snake_case__ ( self : Dict ) -> None: self._resize(len(self._buckets ) * 2 ) def snake_case__ ( self : Union[str, Any] ) -> None: self._resize(len(self._buckets ) // 2 ) def snake_case__ ( self : Dict , __a : KEY ) -> Iterator[int]: __UpperCAmelCase = self._get_bucket_index(UpperCamelCase__ ) for _ in range(len(self._buckets ) ): yield ind __UpperCAmelCase = self._get_next_ind(UpperCamelCase__ ) def snake_case__ ( self : Any , __a : KEY , __a : VAL ) -> None: for ind in self._iterate_buckets(UpperCamelCase__ ): if self._try_set(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): break def __setitem__( self : str , __a : KEY , __a : VAL ) -> None: if self._is_full(): self._size_up() self._add_item(UpperCamelCase__ , UpperCamelCase__ ) def __delitem__( self : int , __a : KEY ) -> None: for ind in self._iterate_buckets(UpperCamelCase__ ): __UpperCAmelCase = self._buckets[ind] if item is None: raise KeyError(UpperCamelCase__ ) if item is _deleted: continue if item.key == key: __UpperCAmelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Tuple , __a : KEY ) -> VAL: for ind in self._iterate_buckets(UpperCamelCase__ ): __UpperCAmelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(UpperCamelCase__ ) def __len__( self : Optional[int] ) -> int: return self._len def __iter__( self : int ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self : str ) -> str: __UpperCAmelCase = ''' ,'''.join( f"""{item.key}: {item.val}""" for item in self._buckets if item ) return f"""HashMap({val_string})"""
710
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ): """simple docstring""" __UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" __UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' ) __UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
654
0
'''simple docstring''' print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
711
'''simple docstring''' from __future__ import annotations from statistics import mean def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = [0] * no_of_processes __UpperCAmelCase = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(UpperCamelCase__ ): __UpperCAmelCase = burst_time[i] __UpperCAmelCase = [] __UpperCAmelCase = 0 __UpperCAmelCase = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: __UpperCAmelCase = [] __UpperCAmelCase = -1 for i in range(UpperCamelCase__ ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: __UpperCAmelCase = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: __UpperCAmelCase = i total_time += burst_time[target_process] completed += 1 __UpperCAmelCase = 0 __UpperCAmelCase = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ): """simple docstring""" __UpperCAmelCase = [0] * no_of_processes for i in range(UpperCamelCase__ ): __UpperCAmelCase = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("[TEST CASE 01]") __lowerCAmelCase : List[Any] = 4 __lowerCAmelCase : List[Any] = [2, 5, 3, 7] __lowerCAmelCase : Tuple = [0, 0, 0, 0] __lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes) __lowerCAmelCase : Dict = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time") for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
654
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : Dict = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = {"vocab_file": "sentencepiece.model"} __lowerCAmelCase : Any = { "vocab_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model", }, } __lowerCAmelCase : str = { "google/rembert": 256, } class A ( UpperCAmelCase ): a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Any , __a : str , __a : Optional[Any]=False , __a : Optional[int]=True , __a : Dict=True , __a : int="[CLS]" , __a : List[Any]="[SEP]" , __a : str="[UNK]" , __a : Any="[SEP]" , __a : List[Any]="[PAD]" , __a : Dict="[CLS]" , __a : Union[str, Any]="[MASK]" , **__a : Tuple , ) -> Optional[int]: super().__init__( do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , ) __UpperCAmelCase = do_lower_case __UpperCAmelCase = remove_space __UpperCAmelCase = keep_accents __UpperCAmelCase = vocab_file __UpperCAmelCase = spm.SentencePieceProcessor() self.sp_model.Load(__a ) @property def snake_case__ ( self : str ) -> Dict: return len(self.sp_model ) def snake_case__ ( self : Optional[Any] ) -> Any: __UpperCAmelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase = self.__dict__.copy() __UpperCAmelCase = None return state def __setstate__( self : Tuple , __a : int ) -> str: __UpperCAmelCase = d __UpperCAmelCase = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def snake_case__ ( self : str , __a : Union[str, Any] , __a : Dict=False ) -> Union[str, Any]: __UpperCAmelCase = self.sp_model.EncodeAsPieces(__a ) return pieces def snake_case__ ( self : Dict , __a : List[str] ) -> Union[str, Any]: return self.sp_model.PieceToId(__a ) def snake_case__ ( self : Tuple , __a : List[Any] ) -> Dict: return self.sp_model.IdToPiece(__a ) def snake_case__ ( self : Optional[Any] , __a : Tuple ) -> Tuple: __UpperCAmelCase = self.sp_model.decode_pieces(__a ) return out_string def snake_case__ ( self : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] = None ) -> List[int]: __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def snake_case__ ( self : List[Any] , __a : Any , __a : Optional[Any] = None , __a : Optional[int] = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1] return [1] + ([0] * len(__a )) + [1] def snake_case__ ( self : Optional[Any] , __a : Tuple , __a : Tuple = None ) -> List[int]: __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__ ( self : Union[str, Any] , __a : List[Any] , __a : List[Any] = None ) -> Tuple[str]: if not os.path.isdir(__a ): logger.error('''Vocabulary path ({}) should be a directory'''.format(__a ) ) return __UpperCAmelCase = os.path.join( __a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ): copyfile(self.vocab_file , __a ) return (out_vocab_file,)
712
'''simple docstring''' from ..utils import DummyObject, requires_backends class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : int , **__a : int ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict: requires_backends(cls , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : str , **__a : Any ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : Any , **__a : int ) -> Tuple: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]: requires_backends(cls , ['''torch'''] )
654
0
'''simple docstring''' import math def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ): """simple docstring""" if ( not isinstance(lowerCamelCase_ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * power_factor def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ): """simple docstring""" if ( not isinstance(lowerCamelCase_ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
713
'''simple docstring''' import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
654
0
from __future__ import annotations import math def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = str(A__ ) __UpperCAmelCase = [n] for i in range(1 , len(A__ ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" if len(str(A__ ) ) > 3: if not is_prime(int(str(A__ )[-3:] ) ) or not is_prime(int(str(A__ )[:3] ) ): return False return True def lowerCAmelCase ( UpperCamelCase__ : int = 1_1 ): """simple docstring""" __UpperCAmelCase = [] __UpperCAmelCase = 1_3 while len(A__ ) != count: if validate(A__ ): __UpperCAmelCase = list_truncated_nums(A__ ) if all(is_prime(A__ ) for i in list_nums ): list_truncated_primes.append(A__ ) num += 2 return list_truncated_primes def lowerCAmelCase ( ): """simple docstring""" return sum(compute_truncated_primes(1_1 ) ) if __name__ == "__main__": print(F"""{sum(compute_truncated_primes(11)) = }""")
714
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : Optional[Any] = { "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = ["LlamaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = ["LlamaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = [ "LlamaForCausalLM", "LlamaModel", "LlamaPreTrainedModel", "LlamaForSequenceClassification", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
654
0
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class A : @staticmethod def snake_case__ ( *__a : Optional[Any] , **__a : Union[str, Any] ) -> List[Any]: pass @is_pipeline_test @require_vision @require_torch class A ( unittest.TestCase ): a_ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def snake_case__ ( self : List[str] , __a : int , __a : Tuple , __a : Any ) -> Optional[int]: __UpperCAmelCase = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) __UpperCAmelCase = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def snake_case__ ( self : Optional[Any] , __a : Optional[Any] , __a : List[Any] ) -> List[Any]: __UpperCAmelCase = object_detector(examples[0] , threshold=0.0 ) __UpperCAmelCase = len(__a ) self.assertGreater(__a , 0 ) self.assertEqual( __a , [ { '''score''': ANY(__a ), '''label''': ANY(__a ), '''box''': {'''xmin''': ANY(__a ), '''ymin''': ANY(__a ), '''xmax''': ANY(__a ), '''ymax''': ANY(__a )}, } for i in range(__a ) ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def snake_case__ ( self : List[str] ) -> str: pass @require_torch def snake_case__ ( self : int ) -> Tuple: __UpperCAmelCase = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) __UpperCAmelCase = object_detector( '''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, ] , ) __UpperCAmelCase = object_detector( [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}}, {'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}}, {'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, {'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}}, {'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}}, ] ] , ) @require_torch @slow def snake_case__ ( self : int ) -> List[str]: __UpperCAmelCase = pipeline('''zero-shot-object-detection''' ) __UpperCAmelCase = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}}, ] , ) __UpperCAmelCase = object_detector( [ { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, ] , ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}}, ], [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, {'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}}, {'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}}, ], ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def snake_case__ ( self : Any ) -> str: pass @require_torch @slow def snake_case__ ( self : Union[str, Any] ) -> Tuple: __UpperCAmelCase = 0.2 __UpperCAmelCase = pipeline('''zero-shot-object-detection''' ) __UpperCAmelCase = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__a , ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, {'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}}, ] , ) @require_torch @slow def snake_case__ ( self : Any ) -> Union[str, Any]: __UpperCAmelCase = 2 __UpperCAmelCase = pipeline('''zero-shot-object-detection''' ) __UpperCAmelCase = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__a , ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}}, {'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}}, ] , )
715
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ): """simple docstring""" __UpperCAmelCase = {} if train_file is not None: __UpperCAmelCase = [train_file] if eval_file is not None: __UpperCAmelCase = [eval_file] if test_file is not None: __UpperCAmelCase = [test_file] __UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ ) __UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() ) __UpperCAmelCase = features_name.pop(UpperCamelCase__ ) __UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) ) __UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )} __UpperCAmelCase = tokenizer.model_input_names __UpperCAmelCase = {} if len(UpperCamelCase__ ) == 1: for k in files.keys(): __UpperCAmelCase = ds[k].map( lambda UpperCamelCase__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , ) elif len(UpperCamelCase__ ) == 2: for k in files.keys(): __UpperCAmelCase = ds[k].map( lambda UpperCamelCase__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid __lowerCAmelCase : List[Any] = logging.getLogger(__name__) @dataclass class A : a_ = field(metadata={'''help''': '''Which column contains the label'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} ) a_ = field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class A : a_ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def lowerCAmelCase ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ f"""16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , ) def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict: __UpperCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __UpperCAmelCase = TFTrainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCAmelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __UpperCAmelCase = trainer.evaluate() __UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(UpperCamelCase__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) results.update(UpperCamelCase__ ) return results if __name__ == "__main__": main()
654
0
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging __lowerCAmelCase : str = logging.get_logger(__name__) def lowerCAmelCase ( UpperCamelCase__ : str=None , UpperCamelCase__ : str=None ): """simple docstring""" return field(default_factory=lambda: default , metadata=a__ ) @dataclass class A : a_ = list_field( default=[] , metadata={ '''help''': ( '''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version''' ''' of all available models''' ) } , ) a_ = list_field( default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} ) a_ = list_field( default=[8, 3_2, 1_2_8, 5_1_2] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Use FP16 to accelerate inference.'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Benchmark training of model'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Verbose memory tracing'''} ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , ) a_ = field( default=UpperCAmelCase , metadata={ '''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory''' } , ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Trace memory line by line'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Save result to a CSV file'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Save all print statements in a log file'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Whether to print environment information'''} ) a_ = field( default=UpperCAmelCase , metadata={ '''help''': ( '''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use''' ''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled''' ''' for debugging / testing and on TPU.''' ) } , ) a_ = field( default=F"""inference_time_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , ) a_ = field( default=F"""inference_memory_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , ) a_ = field( default=F"""train_time_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , ) a_ = field( default=F"""train_memory_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , ) a_ = field( default=F"""env_info_{round(time() )}.csv""" , metadata={'''help''': '''CSV filename used if saving environment information.'''} , ) a_ = field( default=F"""log_{round(time() )}.csv""" , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , ) a_ = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} ) a_ = field( default=UpperCAmelCase , metadata={ '''help''': ( '''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain''' ''' model weights.''' ) } , ) def snake_case__ ( self : List[Any] ) -> List[str]: warnings.warn( f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" ''' are deprecated in general and it is advised to use external Benchmarking libraries ''' ''' to benchmark Transformer models.''' , lowercase_ , ) def snake_case__ ( self : Tuple ) -> Tuple: return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def snake_case__ ( self : Any ) -> List[str]: if len(self.models ) <= 0: raise ValueError( '''Please make sure you provide at least one model name / model identifier, *e.g.* `--models''' ''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' ) return self.models @property def snake_case__ ( self : Any ) -> Tuple: if not self.multi_process: return False elif self.is_tpu: logger.info('''Multiprocessing is currently not possible on TPU.''' ) return False else: return True
716
'''simple docstring''' from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class A : def __init__( self : List[Any] , __a : Any , ) -> Dict: __UpperCAmelCase = parent __UpperCAmelCase = 1_3 __UpperCAmelCase = 7 __UpperCAmelCase = True __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = True __UpperCAmelCase = 9_9 __UpperCAmelCase = 3_2 __UpperCAmelCase = 2 __UpperCAmelCase = 4 __UpperCAmelCase = 3_7 __UpperCAmelCase = '''gelu''' __UpperCAmelCase = 0.1 __UpperCAmelCase = 0.1 __UpperCAmelCase = 5_1_2 __UpperCAmelCase = 1_6 __UpperCAmelCase = 2 __UpperCAmelCase = 0.0_2 __UpperCAmelCase = 3 __UpperCAmelCase = 4 __UpperCAmelCase = None def snake_case__ ( self : Optional[int] ) -> Dict: __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase = None if self.use_input_mask: __UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any: __UpperCAmelCase = TFDistilBertModel(config=__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) __UpperCAmelCase = [input_ids, input_mask] __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int: __UpperCAmelCase = TFDistilBertForMaskedLM(config=__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict: __UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a ) __UpperCAmelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, } __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict: __UpperCAmelCase = self.num_labels __UpperCAmelCase = TFDistilBertForSequenceClassification(__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str: __UpperCAmelCase = self.num_choices __UpperCAmelCase = TFDistilBertForMultipleChoice(__a ) __UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, } __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int: __UpperCAmelCase = self.num_labels __UpperCAmelCase = TFDistilBertForTokenClassification(__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : str ) -> Any: __UpperCAmelCase = self.prepare_config_and_inputs() ((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): a_ = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) a_ = ( { '''feature-extraction''': TFDistilBertModel, '''fill-mask''': TFDistilBertForMaskedLM, '''question-answering''': TFDistilBertForQuestionAnswering, '''text-classification''': TFDistilBertForSequenceClassification, '''token-classification''': TFDistilBertForTokenClassification, '''zero-shot''': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) a_ = False a_ = False def snake_case__ ( self : Any ) -> Any: __UpperCAmelCase = TFDistilBertModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 ) def snake_case__ ( self : List[Any] ) -> Optional[int]: self.config_tester.run_common_tests() def snake_case__ ( self : Any ) -> str: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__a ) def snake_case__ ( self : Tuple ) -> Dict: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__a ) def snake_case__ ( self : Union[str, Any] ) -> Any: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__a ) def snake_case__ ( self : Optional[Any] ) -> Dict: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a ) def snake_case__ ( self : Any ) -> int: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a ) def snake_case__ ( self : List[str] ) -> List[Any]: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__a ) @slow def snake_case__ ( self : Dict ) -> Tuple: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): __UpperCAmelCase = TFDistilBertModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_tf class A ( unittest.TestCase ): @slow def snake_case__ ( self : int ) -> Dict: __UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCAmelCase = model(__a )[0] __UpperCAmelCase = [1, 6, 7_6_8] self.assertEqual(output.shape , __a ) __UpperCAmelCase = tf.constant( [ [ [0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9], [0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4], [0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
654
0
'''simple docstring''' import random def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ): """simple docstring""" __UpperCAmelCase = a[left_index] __UpperCAmelCase = left_index + 1 for j in range(left_index + 1 , snake_case__ ): if a[j] < pivot: __UpperCAmelCase , __UpperCAmelCase = a[i], a[j] i += 1 __UpperCAmelCase , __UpperCAmelCase = a[i - 1], a[left_index] return i - 1 def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ): """simple docstring""" if left < right: __UpperCAmelCase = random.randint(snake_case__ , right - 1 ) __UpperCAmelCase , __UpperCAmelCase = ( a[left], a[pivot], ) # switches the pivot with the left most bound __UpperCAmelCase = partition(snake_case__ , snake_case__ , snake_case__ ) quick_sort_random( snake_case__ , snake_case__ , snake_case__ ) # recursive quicksort to the left of the pivot point quick_sort_random( snake_case__ , pivot_index + 1 , snake_case__ ) # recursive quicksort to the right of the pivot point def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = input('''Enter numbers separated by a comma:\n''' ).strip() __UpperCAmelCase = [int(snake_case__ ) for item in user_input.split(''',''' )] quick_sort_random(snake_case__ , 0 , len(snake_case__ ) ) print(snake_case__ ) if __name__ == "__main__": main()
717
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __lowerCAmelCase : List[Any] = { "configuration_audio_spectrogram_transformer": [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ASTConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ASTForAudioClassification", "ASTModel", "ASTPreTrainedModel", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
654
0
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __lowerCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name __lowerCAmelCase : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=8 ): """simple docstring""" __UpperCAmelCase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 __UpperCAmelCase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class A ( UpperCAmelCase ): def __init__( self : Optional[Any] , __a : UNetaDConditionModel , __a : DDPMScheduler , __a : VQModel , ) -> Tuple: super().__init__() self.register_modules( unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , ) __UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def snake_case__ ( self : int , __a : List[Any] , __a : Dict , __a : Optional[Any] , __a : Tuple , __a : Tuple , __a : List[Any] ) -> int: if latents is None: __UpperCAmelCase = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ) else: if latents.shape != shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) __UpperCAmelCase = latents.to(UpperCAmelCase__ ) __UpperCAmelCase = latents * scheduler.init_noise_sigma return latents def snake_case__ ( self : int , __a : List[str]=0 ) -> str: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) __UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" ) __UpperCAmelCase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ ) def snake_case__ ( self : Union[str, Any] , __a : int=0 ) -> str: if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) __UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __UpperCAmelCase = None for cpu_offloaded_model in [self.unet, self.movq]: __UpperCAmelCase = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ ) # We'll offload the last model manually. __UpperCAmelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def snake_case__ ( self : List[Any] ) -> int: if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCAmelCase__ , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCAmelCase__ ) def __call__( self : Any , __a : Union[torch.FloatTensor, List[torch.FloatTensor]] , __a : Union[torch.FloatTensor, List[torch.FloatTensor]] , __a : int = 5_1_2 , __a : int = 5_1_2 , __a : int = 1_0_0 , __a : float = 4.0 , __a : int = 1 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , ) -> Optional[Any]: __UpperCAmelCase = self._execution_device __UpperCAmelCase = guidance_scale > 1.0 if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __UpperCAmelCase = torch.cat(UpperCAmelCase__ , dim=0 ) __UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __UpperCAmelCase = torch.cat(UpperCAmelCase__ , dim=0 ) if do_classifier_free_guidance: __UpperCAmelCase = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 ) __UpperCAmelCase = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 ) __UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ ) self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ ) __UpperCAmelCase = self.scheduler.timesteps __UpperCAmelCase = self.unet.config.in_channels __UpperCAmelCase = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor ) # create initial latent __UpperCAmelCase = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ): # expand the latents if we are doing classifier free guidance __UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __UpperCAmelCase = {'''image_embeds''': image_embeds} __UpperCAmelCase = self.unet( sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0] if do_classifier_free_guidance: __UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) __UpperCAmelCase = noise_pred.chunk(2 ) __UpperCAmelCase = variance_pred.chunk(2 ) __UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __UpperCAmelCase = self.scheduler.step( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0] # post-processing __UpperCAmelCase = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: __UpperCAmelCase = image * 0.5 + 0.5 __UpperCAmelCase = image.clamp(0 , 1 ) __UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __UpperCAmelCase = self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase__ )
718
'''simple docstring''' from ...configuration_utils import PretrainedConfig class A ( UpperCAmelCase ): a_ = '''bert-generation''' def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = hidden_act __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = position_embedding_type __UpperCAmelCase = use_cache
654
0
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer __lowerCAmelCase : Any = logging.getLogger(__name__) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = argparse.ArgumentParser( description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' ) parser.add_argument( '''--dataset_name''' , type=UpperCamelCase__ , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , ) parser.add_argument( '''--dataset_config''' , type=UpperCamelCase__ , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' ) parser.add_argument( '''--tokenizer_name_or_path''' , type=UpperCamelCase__ , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , ) parser.add_argument( '''--shard_size''' , type=UpperCamelCase__ , default=1_0_0_0 , help='''Number of entries to go in a single shard.''' , ) parser.add_argument('''--split''' , type=UpperCamelCase__ , default='''train''' , choices=['''train''', '''test''', '''validation'''] ) parser.add_argument( '''--limit''' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='''Limit the number of shards (used for debugging).''' , ) parser.add_argument( '''--max_length''' , type=UpperCamelCase__ , default=5_1_2 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum''' ''' sequence length that is a multiple of 8.''' , ) parser.add_argument( '''--output_dir''' , default='''tf-tpu''' , type=UpperCamelCase__ , help='''Output directory where the TFRecord shards will be saved. If the''' ''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord''' ''' shards will be directly saved to a Google Cloud Storage bucket.''' , ) __UpperCAmelCase = parser.parse_args() return args def lowerCAmelCase ( UpperCamelCase__ : Tuple ): """simple docstring""" def fn(UpperCamelCase__ : Optional[int] ): return tokenizer(examples['''text'''] ) return fn def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = [] for i in range(len(tokenized_data['''input_ids'''] ) ): __UpperCAmelCase = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ), } __UpperCAmelCase = tf.train.Features(feature=UpperCamelCase__ ) __UpperCAmelCase = tf.train.Example(features=UpperCamelCase__ ) __UpperCAmelCase = example.SerializeToString() records.append(UpperCamelCase__ ) return records def lowerCAmelCase ( UpperCamelCase__ : Tuple ): """simple docstring""" __UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: __UpperCAmelCase = min(len(UpperCamelCase__ ) , args.limit ) __UpperCAmelCase = dataset.select(range(UpperCamelCase__ ) ) print(f"""Limiting the dataset to {args.limit} entries.""" ) __UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) __UpperCAmelCase = os.path.join(args.output_dir , args.split ) if not os.path.exists(UpperCamelCase__ ): os.makedirs(UpperCamelCase__ ) else: __UpperCAmelCase = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. __UpperCAmelCase = tokenize_function(UpperCamelCase__ ) __UpperCAmelCase = dataset.map(UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=4 , remove_columns=['''text'''] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(UpperCamelCase__ : Optional[int] ): # Concatenate all texts. __UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()} __UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 __UpperCAmelCase = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. __UpperCAmelCase = { k: [t[i : i + args.max_length] for i in range(0 , UpperCamelCase__ , args.max_length )] for k, t in concatenated_examples.items() } return result __UpperCAmelCase = dataset_tokenized.map(UpperCamelCase__ , batched=UpperCamelCase__ , batch_size=1_0_0_0 , num_proc=4 ) __UpperCAmelCase = 0 __UpperCAmelCase = 0 for shard in range(0 , len(UpperCamelCase__ ) , args.shard_size ): __UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size] __UpperCAmelCase = len(dataset_snapshot['''input_ids'''] ) __UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""dataset-{shard_count}-{records_containing}.tfrecord""" ) __UpperCAmelCase = get_serialized_examples(UpperCamelCase__ ) with tf.io.TFRecordWriter(UpperCamelCase__ ) as out_file: for i in range(len(UpperCamelCase__ ) ): __UpperCAmelCase = serialized_examples[i] out_file.write(UpperCamelCase__ ) print('''Wrote file {} containing {} records'''.format(UpperCamelCase__ , UpperCamelCase__ ) ) shard_count += 1 total_records += records_containing with open(f"""split-{args.split}-records-count.txt""" , '''w''' ) as f: print(f"""Total {args.split} records: {total_records}""" , file=UpperCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = parse_args() main(args)
719
'''simple docstring''' from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) __lowerCAmelCase : str = 299_792_458 # Symbols __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z") def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 ) def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" return np.array( [ [gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0], [-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ): """simple docstring""" # Ensure event is not empty if event is None: __UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(UpperCamelCase__ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: __lowerCAmelCase : Dict = transform(29_979_245) print("Example of four vector: ") print(F"""ct' = {four_vector[0]}""") print(F"""x' = {four_vector[1]}""") print(F"""y' = {four_vector[2]}""") print(F"""z' = {four_vector[3]}""") # Substitute symbols with numerical values __lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1} __lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)] print(F"""\n{numerical_vector}""")
654
0
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" if num <= 0: raise ValueError('''Input must be a positive integer''' ) __UpperCAmelCase = [True] * (num + 1) __UpperCAmelCase = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , __a ): __UpperCAmelCase = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[str] = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
720
'''simple docstring''' import heapq import sys import numpy as np __lowerCAmelCase : Any = tuple[int, int] class A : def __init__( self : Optional[int] ) -> int: __UpperCAmelCase = [] __UpperCAmelCase = set() def snake_case__ ( self : Optional[Any] ) -> List[Any]: if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def snake_case__ ( self : Dict ) -> Optional[int]: return len(self.elements ) == 0 def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]: if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(__a ) else: # update # print("update", item) __UpperCAmelCase = [] ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def snake_case__ ( self : int , __a : Any ) -> int: if item in self.set: self.set.remove(__a ) __UpperCAmelCase = [] ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def snake_case__ ( self : List[str] ) -> Dict: return self.elements[0][1] def snake_case__ ( self : Any ) -> List[str]: ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) self.set.remove(__a ) return (priority, item) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # euclidean distance __UpperCAmelCase = np.array(UpperCamelCase__ ) __UpperCAmelCase = np.array(UpperCamelCase__ ) return np.linalg.norm(a - b ) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # integer division by time variable return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ): """simple docstring""" __UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ ) return ans def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ): """simple docstring""" __UpperCAmelCase = np.chararray((n, n) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): __UpperCAmelCase = '''*''' for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): if (j, (n - 1) - i) in blocks: __UpperCAmelCase = '''#''' __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[goal] while x != start: ((__UpperCAmelCase) , (__UpperCAmelCase)) = x # print(x) __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[x] __UpperCAmelCase = '''-''' for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) __UpperCAmelCase = back_pointer[goal] while x != start: print(UpperCamelCase__ , end=''' ''' ) __UpperCAmelCase = back_pointer[x] print(UpperCamelCase__ ) sys.exit() def lowerCAmelCase ( UpperCamelCase__ : TPos ): """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ): """simple docstring""" for itera in range(UpperCamelCase__ ): open_list[itera].remove_element(UpperCamelCase__ ) # print("s", s) # print("j", j) ((__UpperCAmelCase) , (__UpperCAmelCase)) = s __UpperCAmelCase = (x - 1, y) __UpperCAmelCase = (x + 1, y) __UpperCAmelCase = (x, y + 1) __UpperCAmelCase = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(UpperCamelCase__ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(UpperCamelCase__ ) __UpperCAmelCase = -1 __UpperCAmelCase = float('''inf''' ) if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1: __UpperCAmelCase = g_function[s] + 1 __UpperCAmelCase = s if neighbours not in close_list_anchor: open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) ) if neighbours not in close_list_inad: for var in range(1 , UpperCamelCase__ ): if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key( UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ): open_list[j].put( UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(1_5 , 2_0 ): some_list.append((x, 1_7) ) for x in range(1_0 , 1_9 ): for y in range(1 , 1_5 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(1_2 , 1_9 ): some_list.append((x, y) ) for x in range(3 , 1_3 ): for y in range(1_6 , 1_9 ): some_list.append((x, y) ) return some_list __lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __lowerCAmelCase : List[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __lowerCAmelCase : Dict = make_common_ground() __lowerCAmelCase : int = blocks_blk # hyper parameters __lowerCAmelCase : Dict = 1 __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : Union[str, Any] = 20 __lowerCAmelCase : Any = 3 # one consistent and two other inconsistent # start and end destination __lowerCAmelCase : Optional[Any] = (0, 0) __lowerCAmelCase : Any = (n - 1, n - 1) __lowerCAmelCase : Optional[int] = 1 def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = {start: 0, goal: float('''inf''' )} __UpperCAmelCase = {start: -1, goal: -1} __UpperCAmelCase = [] __UpperCAmelCase = set() for i in range(UpperCamelCase__ ): open_list.append(PriorityQueue() ) open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) __UpperCAmelCase = [] __UpperCAmelCase = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , UpperCamelCase__ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show() visited.add(UpperCamelCase__ ) expand_state( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) close_list_inad.append(UpperCamelCase__ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __UpperCAmelCase = open_list[0].top_show() visited.add(UpperCamelCase__ ) expand_state( UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) close_list_anchor.append(UpperCamelCase__ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(UpperCamelCase__ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
654
0
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A : def __init__( self : Dict , __a : Dict , __a : Tuple=1_3 , __a : Tuple=3_2 , __a : Any=3 , __a : Dict=4 , __a : List[str]=[1_0, 2_0, 3_0, 4_0] , __a : List[Any]=[2, 2, 3, 2] , __a : Optional[Any]=True , __a : int=True , __a : List[str]=3_7 , __a : Optional[int]="gelu" , __a : Dict=1_0 , __a : Any=0.0_2 , __a : Union[str, Any]=["stage2", "stage3", "stage4"] , __a : Tuple=[2, 3, 4] , __a : Union[str, Any]=None , ) -> str: __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = image_size __UpperCAmelCase = num_channels __UpperCAmelCase = num_stages __UpperCAmelCase = hidden_sizes __UpperCAmelCase = depths __UpperCAmelCase = is_training __UpperCAmelCase = use_labels __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_act __UpperCAmelCase = num_labels __UpperCAmelCase = initializer_range __UpperCAmelCase = out_features __UpperCAmelCase = out_indices __UpperCAmelCase = scope def snake_case__ ( self : Optional[Any] ) -> Dict: __UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase = self.get_config() return config, pixel_values, labels def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]: return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=snake_case_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def snake_case__ ( self : Any , __a : Optional[Any] , __a : Any , __a : List[str] ) -> Union[str, Any]: __UpperCAmelCase = ConvNextVaModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() __UpperCAmelCase = model(snake_case_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def snake_case__ ( self : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any] , __a : List[Any] ) -> Any: __UpperCAmelCase = ConvNextVaForImageClassification(snake_case_ ) model.to(snake_case_ ) model.eval() __UpperCAmelCase = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__ ( self : int , __a : Union[str, Any] , __a : List[str] , __a : Union[str, Any] ) -> Any: __UpperCAmelCase = ConvNextVaBackbone(config=snake_case_ ) model.to(snake_case_ ) model.eval() __UpperCAmelCase = model(snake_case_ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __UpperCAmelCase = None __UpperCAmelCase = ConvNextVaBackbone(config=snake_case_ ) model.to(snake_case_ ) model.eval() __UpperCAmelCase = model(snake_case_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def snake_case__ ( self : List[str] ) -> List[Any]: __UpperCAmelCase = self.prepare_config_and_inputs() __UpperCAmelCase = config_and_inputs __UpperCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict def snake_case__ ( self : List[str] ) -> List[Any]: __UpperCAmelCase = self.prepare_config_and_inputs() __UpperCAmelCase = config_and_inputs __UpperCAmelCase = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class A ( _snake_case , _snake_case , unittest.TestCase ): a_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) a_ = ( {"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification} if is_torch_available() else {} ) a_ = False a_ = False a_ = False a_ = False a_ = False def snake_case__ ( self : List[str] ) -> Optional[int]: __UpperCAmelCase = ConvNextVaModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 ) def snake_case__ ( self : List[Any] ) -> str: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case__ ( self : Optional[Any] ) -> Optional[Any]: return @unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' ) def snake_case__ ( self : List[Any] ) -> str: pass @unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' ) def snake_case__ ( self : List[Any] ) -> Optional[Any]: pass @unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' ) def snake_case__ ( self : str ) -> Union[str, Any]: pass def snake_case__ ( self : Union[str, Any] ) -> str: if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase = True if model_class.__name__ in [ *get_values(snake_case_ ), *get_values(snake_case_ ), ]: continue __UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.train() __UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) __UpperCAmelCase = model(**snake_case_ ).loss loss.backward() def snake_case__ ( self : Union[str, Any] ) -> Dict: if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase = False __UpperCAmelCase = True if ( model_class.__name__ in [*get_values(snake_case_ ), *get_values(snake_case_ )] or not model_class.supports_gradient_checkpointing ): continue __UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.gradient_checkpointing_enable() model.train() __UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) __UpperCAmelCase = model(**snake_case_ ).loss loss.backward() def snake_case__ ( self : Dict ) -> str: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(snake_case_ ) __UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase = [*signature.parameters.keys()] __UpperCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case_ ) def snake_case__ ( self : Optional[Any] ) -> List[str]: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def snake_case__ ( self : int ) -> Optional[Any]: def check_hidden_states_output(__a : Dict , __a : Any , __a : List[str] ): __UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): __UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) __UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase = self.model_tester.num_stages self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) def snake_case__ ( self : Any ) -> int: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) @slow def snake_case__ ( self : Tuple ) -> Optional[int]: for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase = ConvNextVaModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class A ( unittest.TestCase ): @cached_property def snake_case__ ( self : int ) -> int: return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None @slow def snake_case__ ( self : str ) -> Optional[Any]: __UpperCAmelCase = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(snake_case_ ) __UpperCAmelCase = self.default_image_processor __UpperCAmelCase = prepare_img() __UpperCAmelCase = preprocessor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ ) # forward pass with torch.no_grad(): __UpperCAmelCase = model(**snake_case_ ) # verify the logits __UpperCAmelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , snake_case_ ) __UpperCAmelCase = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
721
'''simple docstring''' import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py __lowerCAmelCase : List[Any] = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. __lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") __lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) __lowerCAmelCase : Optional[int] = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ] def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ ) return [m.group(0 ) for m in matches] def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __UpperCAmelCase = { config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. __UpperCAmelCase = collections.defaultdict(UpperCamelCase__ ) __UpperCAmelCase = collections.defaultdict(UpperCamelCase__ ) __UpperCAmelCase = collections.defaultdict(UpperCamelCase__ ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(UpperCamelCase__ ): __UpperCAmelCase = None if _re_tf_models.match(UpperCamelCase__ ) is not None: __UpperCAmelCase = tf_models __UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0] elif _re_flax_models.match(UpperCamelCase__ ) is not None: __UpperCAmelCase = flax_models __UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0] elif _re_pt_models.match(UpperCamelCase__ ) is not None: __UpperCAmelCase = pt_models __UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0] if lookup_dict is not None: while len(UpperCamelCase__ ) > 0: if attr_name in model_prefix_to_model_type: __UpperCAmelCase = True break # Try again after removing the last word in the name __UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] ) __UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) __UpperCAmelCase = list(UpperCamelCase__ ) all_models.sort() __UpperCAmelCase = {'''model_type''': all_models} __UpperCAmelCase = [pt_models[t] for t in all_models] __UpperCAmelCase = [tf_models[t] for t in all_models] __UpperCAmelCase = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure __UpperCAmelCase = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: __UpperCAmelCase = '''AutoProcessor''' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: __UpperCAmelCase = '''AutoTokenizer''' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: __UpperCAmelCase = '''AutoFeatureExtractor''' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. __UpperCAmelCase = '''AutoTokenizer''' __UpperCAmelCase = [processors[t] for t in all_models] return pd.DataFrame(UpperCamelCase__ ) def lowerCAmelCase ( UpperCamelCase__ : List[str] ): """simple docstring""" __UpperCAmelCase = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: __UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""] __UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): # The type of pipeline may not exist in this framework if not hasattr(UpperCamelCase__ , UpperCamelCase__ ): continue # First extract all model_names __UpperCAmelCase = [] for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): model_names.append(UpperCamelCase__ ) else: model_names.extend(list(UpperCamelCase__ ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ): """simple docstring""" __UpperCAmelCase = get_frameworks_table() __UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ ) __UpperCAmelCase = hf_hub_download( '''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ ) __UpperCAmelCase = Dataset.from_json(UpperCamelCase__ ) __UpperCAmelCase = { tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class''']) for i in range(len(UpperCamelCase__ ) ) } __UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. __UpperCAmelCase = sorted(table.keys() ) __UpperCAmelCase = pd.DataFrame( { '''model_class''': model_classes, '''pipeline_tag''': [table[m][0] for m in model_classes], '''auto_class''': [table[m][1] for m in model_classes], } ) __UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) ) tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) ) if commit_sha is not None: __UpperCAmelCase = ( f"""Update with commit {commit_sha}\n\nSee: """ f"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: __UpperCAmelCase = '''Update''' upload_folder( repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} __UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS __UpperCAmelCase = [] for key in pipeline_tasks: if key not in in_table: __UpperCAmelCase = pipeline_tasks[key]['''pt'''] if isinstance(UpperCamelCase__ , (list, tuple) ): __UpperCAmelCase = model[0] __UpperCAmelCase = model.__name__ if model not in in_table.values(): missing.append(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: __UpperCAmelCase = ''', '''.join(UpperCamelCase__ ) raise ValueError( '''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ''' f"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") __lowerCAmelCase : Tuple = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
654
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class A ( lowercase__ , lowercase__ , unittest.TestCase ): a_ = StableDiffusionPanoramaPipeline a_ = TEXT_TO_IMAGE_PARAMS a_ = TEXT_TO_IMAGE_BATCH_PARAMS a_ = TEXT_TO_IMAGE_IMAGE_PARAMS a_ = TEXT_TO_IMAGE_IMAGE_PARAMS def snake_case__ ( self : Tuple ) -> str: torch.manual_seed(0 ) __UpperCAmelCase = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , ) __UpperCAmelCase = DDIMScheduler() torch.manual_seed(0 ) __UpperCAmelCase = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) __UpperCAmelCase = CLIPTextModel(UpperCAmelCase__ ) __UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def snake_case__ ( self : Tuple , __a : Any , __a : Any=0 ) -> List[Any]: __UpperCAmelCase = torch.manual_seed(UpperCAmelCase__ ) __UpperCAmelCase = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, # Setting height and width to None to prevent OOMs on CPU. '''height''': None, '''width''': None, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def snake_case__ ( self : Optional[Any] ) -> Any: __UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase__ ) __UpperCAmelCase = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ ) __UpperCAmelCase = sd_pipe(**UpperCAmelCase__ ).images __UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCAmelCase = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case__ ( self : Dict ) -> Union[str, Any]: super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def snake_case__ ( self : Tuple ) -> Optional[Any]: super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 ) def snake_case__ ( self : Tuple ) -> Optional[Any]: __UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase__ ) __UpperCAmelCase = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ ) __UpperCAmelCase = '''french fries''' __UpperCAmelCase = sd_pipe(**UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ ) __UpperCAmelCase = output.images __UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCAmelCase = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case__ ( self : Dict ) -> Optional[int]: __UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase__ ) __UpperCAmelCase = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ ) __UpperCAmelCase = sd_pipe(**UpperCAmelCase__ , view_batch_size=2 ) __UpperCAmelCase = output.images __UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCAmelCase = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case__ ( self : str ) -> str: __UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' ) __UpperCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase__ ) __UpperCAmelCase = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ ) __UpperCAmelCase = sd_pipe(**UpperCAmelCase__ ).images __UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCAmelCase = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case__ ( self : Tuple ) -> str: __UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = PNDMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , skip_prk_steps=UpperCAmelCase__ ) __UpperCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase__ ) __UpperCAmelCase = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ ) __UpperCAmelCase = sd_pipe(**UpperCAmelCase__ ).images __UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __UpperCAmelCase = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class A ( unittest.TestCase ): def snake_case__ ( self : int ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : int , __a : int=0 ) -> List[Any]: __UpperCAmelCase = torch.manual_seed(UpperCAmelCase__ ) __UpperCAmelCase = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def snake_case__ ( self : Any ) -> Dict: __UpperCAmelCase = '''stabilityai/stable-diffusion-2-base''' __UpperCAmelCase = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' ) __UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) pipe.enable_attention_slicing() __UpperCAmelCase = self.get_inputs() __UpperCAmelCase = pipe(**UpperCAmelCase__ ).images __UpperCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 2_0_4_8, 3) __UpperCAmelCase = np.array( [ 0.3_6_9_6_8_3_9_2, 0.2_7_0_2_5_3_7_2, 0.3_2_4_4_6_7_6_6, 0.2_8_3_7_9_3_8_7, 0.3_6_3_6_3_2_7_4, 0.3_0_7_3_3_3_4_7, 0.2_7_1_0_0_0_2_7, 0.2_7_0_5_4_1_2_5, 0.2_5_5_3_6_0_9_6, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def snake_case__ ( self : Any ) -> List[str]: __UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-base''' , safety_checker=UpperCAmelCase__ ) __UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) pipe.enable_attention_slicing() __UpperCAmelCase = self.get_inputs() __UpperCAmelCase = pipe(**UpperCAmelCase__ ).images __UpperCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 2_0_4_8, 3) __UpperCAmelCase = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def snake_case__ ( self : Optional[int] ) -> int: __UpperCAmelCase = 0 def callback_fn(__a : int , __a : int , __a : torch.FloatTensor ) -> None: __UpperCAmelCase = True nonlocal number_of_steps number_of_steps += 1 if step == 1: __UpperCAmelCase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 2_5_6) __UpperCAmelCase = latents[0, -3:, -3:, -1] __UpperCAmelCase = np.array( [ 0.1_8_6_8_1_8_6_9, 0.3_3_9_0_7_8_1_6, 0.5_3_6_1_2_7_6, 0.1_4_4_3_2_8_6_5, -0.0_2_8_5_6_6_1_1, -0.7_3_9_4_1_1_2_3, 0.2_3_3_9_7_9_8_7, 0.4_7_3_2_2_6_8_2, -0.3_7_8_2_3_1_6_4, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: __UpperCAmelCase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 2_5_6) __UpperCAmelCase = latents[0, -3:, -3:, -1] __UpperCAmelCase = np.array( [ 0.1_8_5_3_9_6_4_5, 0.3_3_9_8_7_2_4_8, 0.5_3_7_8_5_5_9, 0.1_4_4_3_7_1_4_2, -0.0_2_4_5_5_2_6_1, -0.7_3_3_8_3_1_7, 0.2_3_9_9_0_7_5_5, 0.4_7_3_5_6_2_7_2, -0.3_7_8_6_5_0_5, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 __UpperCAmelCase = False __UpperCAmelCase = '''stabilityai/stable-diffusion-2-base''' __UpperCAmelCase = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' ) __UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ ) __UpperCAmelCase = pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) pipe.enable_attention_slicing() __UpperCAmelCase = self.get_inputs() pipe(**UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def snake_case__ ( self : int ) -> Any: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __UpperCAmelCase = '''stabilityai/stable-diffusion-2-base''' __UpperCAmelCase = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' ) __UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ ) __UpperCAmelCase = pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __UpperCAmelCase = self.get_inputs() __UpperCAmelCase = pipe(**UpperCAmelCase__ ) __UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 1_0**9
700
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) __lowerCAmelCase : Optional[int] = [ "cross_validation.py", "gradient_accumulation.py", "local_sgd.py", "multi_process_metrics.py", "memory.py", "automatic_gradient_accumulation.py", "fsdp_with_peak_mem_tracking.py", "deepspeed_with_config_support.py", "megatron_lm_gpt_pretraining.py", ] class A ( unittest.TestCase ): def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple: __UpperCAmelCase = None __UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) __UpperCAmelCase = os.path.abspath('''examples''' ) for item in os.listdir(__a ): if item not in EXCLUDE_EXAMPLES: __UpperCAmelCase = os.path.join(__a , __a ) if os.path.isfile(__a ) and ".py" in item_path: with self.subTest( tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ): __UpperCAmelCase = compare_against_test( os.path.join(__a , __a ) , __a , __a , __a ) __UpperCAmelCase = '''\n'''.join(__a ) if special_strings is not None: for string in special_strings: __UpperCAmelCase = diff.replace(__a , '''''' ) self.assertEqual(__a , '''''' ) def snake_case__ ( self : Optional[Any] ) -> str: self.one_complete_example('''complete_nlp_example.py''' , __a ) self.one_complete_example('''complete_nlp_example.py''' , __a ) def snake_case__ ( self : List[str] ) -> Tuple: __UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) __UpperCAmelCase = [ ''' ''' * 1_6 + '''{\n\n''', ''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 2_0 + '''"epoch": epoch,\n\n''', ''' ''' * 1_6 + '''},\n\n''', ''' ''' * 1_6 + '''step=epoch,\n''', ''' ''' * 1_2, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a ) self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a ) @mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} ) class A ( UpperCAmelCase ): a_ = False @classmethod def snake_case__ ( cls : Tuple ) -> str: super().setUpClass() __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) __UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case__ ( cls : Dict ) -> int: super().tearDownClass() shutil.rmtree(cls._tmpdir ) def snake_case__ ( self : Tuple ) -> Dict: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def snake_case__ ( self : str ) -> int: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() __UpperCAmelCase = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def snake_case__ ( self : Any ) -> Any: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )} """.split() __UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a ) self.assertNotIn('''epoch 0:''' , __a ) self.assertIn('''epoch 1:''' , __a ) def snake_case__ ( self : Tuple ) -> Optional[int]: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )} """.split() __UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a ) if torch.cuda.is_available(): __UpperCAmelCase = torch.cuda.device_count() else: __UpperCAmelCase = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , __a ) self.assertIn('''epoch 1:''' , __a ) else: self.assertIn('''epoch 0:''' , __a ) self.assertIn('''epoch 1:''' , __a ) @slow def snake_case__ ( self : Any ) -> Optional[Any]: __UpperCAmelCase = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): __UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a ) __UpperCAmelCase = re.findall('''({.+})''' , __a ) __UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1] __UpperCAmelCase = ast.literal_eval(__a ) self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 ) def snake_case__ ( self : Dict ) -> int: __UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Optional[Any] ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmpdir: __UpperCAmelCase = f""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) ) def snake_case__ ( self : Optional[int] ) -> List[Any]: __UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def snake_case__ ( self : Tuple ) -> Optional[Any]: __UpperCAmelCase = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
654
0
def lowerCAmelCase ( UpperCamelCase__ : Dict ): """simple docstring""" assert column_title.isupper() __UpperCAmelCase = 0 __UpperCAmelCase = len(_lowerCAmelCase ) - 1 __UpperCAmelCase = 0 while index >= 0: __UpperCAmelCase = (ord(column_title[index] ) - 6_4) * pow(2_6 , _lowerCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
701
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva __lowerCAmelCase : Any = "" __lowerCAmelCase : int = "" __lowerCAmelCase : Union[str, Any] = "" __lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ ) print('''Processing...''' ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for index, image in enumerate(UpperCamelCase__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __UpperCAmelCase = random_chars(3_2 ) __UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] ) print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" ) __UpperCAmelCase = [] for anno in new_annos[index]: __UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(UpperCamelCase__ ) with open(f"""/{file_root}.txt""" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ): """simple docstring""" __UpperCAmelCase = [] __UpperCAmelCase = [] for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ): __UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(UpperCamelCase__ ) as in_file: __UpperCAmelCase = in_file.readlines() __UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" ) __UpperCAmelCase = [] for obj_list in obj_lists: __UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(UpperCamelCase__ ) labels.append(UpperCamelCase__ ) return img_paths, labels def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ): """simple docstring""" __UpperCAmelCase = [] __UpperCAmelCase = [] __UpperCAmelCase = [] for idx in range(len(UpperCamelCase__ ) ): __UpperCAmelCase = [] __UpperCAmelCase = img_list[idx] path_list.append(UpperCamelCase__ ) __UpperCAmelCase = anno_list[idx] __UpperCAmelCase = cva.imread(UpperCamelCase__ ) if flip_type == 1: __UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ ) for bbox in img_annos: __UpperCAmelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ ) for bbox in img_annos: __UpperCAmelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(UpperCamelCase__ ) new_imgs_list.append(UpperCamelCase__ ) return new_imgs_list, new_annos_lists, path_list def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" __UpperCAmelCase = ascii_lowercase + digits return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) ) if __name__ == "__main__": main() print("DONE ✅")
654
0
'''simple docstring''' import heapq import sys import numpy as np __lowerCAmelCase : int = tuple[int, int] class A : def __init__( self : List[str] ) -> Optional[Any]: __UpperCAmelCase = [] __UpperCAmelCase = set() def snake_case__ ( self : List[str] ) -> int: if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def snake_case__ ( self : Union[str, Any] ) -> List[Any]: return len(self.elements ) == 0 def snake_case__ ( self : Dict , __a : Dict , __a : Any ) -> Union[str, Any]: if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(__lowerCamelCase ) else: # update # print("update", item) __UpperCAmelCase = [] (__UpperCAmelCase) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) (__UpperCAmelCase) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def snake_case__ ( self : Any , __a : Optional[int] ) -> Optional[int]: if item in self.set: self.set.remove(__lowerCamelCase ) __UpperCAmelCase = [] (__UpperCAmelCase) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) (__UpperCAmelCase) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def snake_case__ ( self : Dict ) -> List[Any]: return self.elements[0][1] def snake_case__ ( self : Dict ) -> Any: (__UpperCAmelCase) = heapq.heappop(self.elements ) self.set.remove(__lowerCamelCase ) return (priority, item) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" __UpperCAmelCase = np.array(lowerCamelCase_ ) __UpperCAmelCase = np.array(lowerCamelCase_ ) return np.linalg.norm(a - b ) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" return consistent_heuristic(lowerCamelCase_ , lowerCamelCase_ ) // t def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ): """simple docstring""" __UpperCAmelCase = g_function[start] + Wa * heuristics[i](lowerCamelCase_ , lowerCamelCase_ ) return ans def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase = np.chararray((n, n) ) for i in range(lowerCamelCase_ ): for j in range(lowerCamelCase_ ): __UpperCAmelCase = '''*''' for i in range(lowerCamelCase_ ): for j in range(lowerCamelCase_ ): if (j, (n - 1) - i) in blocks: __UpperCAmelCase = '''#''' __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[goal] while x != start: (__UpperCAmelCase) = x # print(x) __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[x] __UpperCAmelCase = '''-''' for i in range(lowerCamelCase_ ): for j in range(lowerCamelCase_ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) __UpperCAmelCase = back_pointer[goal] while x != start: print(lowerCamelCase_ , end=''' ''' ) __UpperCAmelCase = back_pointer[x] print(lowerCamelCase_ ) sys.exit() def lowerCAmelCase ( UpperCamelCase__ : TPos ): """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , ): """simple docstring""" for itera in range(lowerCamelCase_ ): open_list[itera].remove_element(lowerCamelCase_ ) # print("s", s) # print("j", j) (__UpperCAmelCase) = s __UpperCAmelCase = (x - 1, y) __UpperCAmelCase = (x + 1, y) __UpperCAmelCase = (x, y + 1) __UpperCAmelCase = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(lowerCamelCase_ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(lowerCamelCase_ ) __UpperCAmelCase = -1 __UpperCAmelCase = float('''inf''' ) if valid(lowerCamelCase_ ) and g_function[neighbours] > g_function[s] + 1: __UpperCAmelCase = g_function[s] + 1 __UpperCAmelCase = s if neighbours not in close_list_anchor: open_list[0].put(lowerCamelCase_ , key(lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ ) ) if neighbours not in close_list_inad: for var in range(1 , lowerCamelCase_ ): if key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) <= Wa * key( lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ ): open_list[j].put( lowerCamelCase_ , key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(1_5 , 2_0 ): some_list.append((x, 1_7) ) for x in range(1_0 , 1_9 ): for y in range(1 , 1_5 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(1_2 , 1_9 ): some_list.append((x, y) ) for x in range(3 , 1_3 ): for y in range(1_6 , 1_9 ): some_list.append((x, y) ) return some_list __lowerCAmelCase : Dict = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __lowerCAmelCase : Any = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __lowerCAmelCase : Optional[int] = make_common_ground() __lowerCAmelCase : Optional[int] = blocks_blk # hyper parameters __lowerCAmelCase : List[Any] = 1 __lowerCAmelCase : str = 1 __lowerCAmelCase : Tuple = 20 __lowerCAmelCase : int = 3 # one consistent and two other inconsistent # start and end destination __lowerCAmelCase : Optional[int] = (0, 0) __lowerCAmelCase : Optional[int] = (n - 1, n - 1) __lowerCAmelCase : Union[str, Any] = 1 def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = {start: 0, goal: float('''inf''' )} __UpperCAmelCase = {start: -1, goal: -1} __UpperCAmelCase = [] __UpperCAmelCase = set() for i in range(lowerCamelCase_ ): open_list.append(PriorityQueue() ) open_list[i].put(lowerCamelCase_ , key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ) __UpperCAmelCase = [] __UpperCAmelCase = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , lowerCamelCase_ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) else: __UpperCAmelCase = open_list[i].top_show() visited.add(lowerCamelCase_ ) expand_state( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) close_list_inad.append(lowerCamelCase_ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) else: __UpperCAmelCase = open_list[0].top_show() visited.add(lowerCamelCase_ ) expand_state( lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) close_list_anchor.append(lowerCamelCase_ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(lowerCamelCase_ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
702
'''simple docstring''' from pathlib import Path import fire def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = Path(UpperCamelCase__ ) __UpperCAmelCase = Path(UpperCamelCase__ ) dest_dir.mkdir(exist_ok=UpperCamelCase__ ) for path in src_dir.iterdir(): __UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n] __UpperCAmelCase = dest_dir.joinpath(path.name ) print(UpperCamelCase__ ) dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) ) if __name__ == "__main__": fire.Fire(minify)
654
0
'''simple docstring''' import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class A ( unittest.TestCase ): a_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def snake_case__ ( self : Dict , __a : int , __a : Any , __a : int ) -> str: __UpperCAmelCase = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) __UpperCAmelCase = VideoClassificationPipeline(model=_lowercase , image_processor=_lowercase , top_k=2 ) __UpperCAmelCase = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def snake_case__ ( self : Tuple , __a : Union[str, Any] , __a : List[Any] ) -> List[str]: for example in examples: __UpperCAmelCase = video_classifier(_lowercase ) self.assertEqual( _lowercase , [ {'''score''': ANY(_lowercase ), '''label''': ANY(_lowercase )}, {'''score''': ANY(_lowercase ), '''label''': ANY(_lowercase )}, ] , ) @require_torch def snake_case__ ( self : str ) -> Tuple: __UpperCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' __UpperCAmelCase = VideoMAEFeatureExtractor( size={'''shortest_edge''': 1_0} , crop_size={'''height''': 1_0, '''width''': 1_0} ) __UpperCAmelCase = pipeline( '''video-classification''' , model=_lowercase , feature_extractor=_lowercase , frame_sampling_rate=4 ) __UpperCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) __UpperCAmelCase = video_classifier(_lowercase , top_k=2 ) self.assertEqual( nested_simplify(_lowercase , decimals=4 ) , [{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}] , ) __UpperCAmelCase = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_lowercase , decimals=4 ) , [ [{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}], [{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}], ] , ) @require_tf def snake_case__ ( self : Dict ) -> Optional[Any]: pass
703
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): __UpperCAmelCase = f"""Input value of [number={number}] must be an integer""" raise TypeError(UpperCamelCase__ ) if number < 1: __UpperCAmelCase = f"""Input value of [number={number}] must be > 0""" raise ValueError(UpperCamelCase__ ) __UpperCAmelCase = 1 for i in range(1 , UpperCamelCase__ ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
654
0
'''simple docstring''' import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger __lowerCAmelCase : List[str] = "<<<<<<< This should probably be modified because it mentions: " __lowerCAmelCase : List[str] = "=======\n>>>>>>>\n" __lowerCAmelCase : Optional[int] = [ "TextEncoderConfig", "ByteTextEncoder", "SubwordTextEncoder", "encoder_config", "maybe_build_from_corpus", "manual_dir", ] __lowerCAmelCase : Optional[Any] = [ # (pattern, replacement) # Order is important here for some replacements (r"tfds\.core", r"datasets"), (r"tf\.io\.gfile\.GFile", r"open"), (r"tf\.([\w\d]+)", r"datasets.Value('\1')"), (r"tfds\.features\.Text\(\)", r"datasets.Value('string')"), (r"tfds\.features\.Text\(", r"datasets.Value('string'),"), (r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("), (r"tfds\.features\.FeaturesDict\(", r"dict("), (r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"), (r"tfds\.", r"datasets."), (r"dl_manager\.manual_dir", r"self.config.data_dir"), (r"self\.builder_config", r"self.config"), ] def lowerCAmelCase ( UpperCamelCase__ : str ): """simple docstring""" return ConvertCommand(args.tfds_path , args.datasets_directory ) class A ( snake_case__ ): @staticmethod def snake_case__ ( __a : Any ) -> List[str]: __UpperCAmelCase = parser.add_parser( '''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , ) train_parser.add_argument( '''--tfds_path''' , type=lowercase_ , required=lowercase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , ) train_parser.add_argument( '''--datasets_directory''' , type=lowercase_ , required=lowercase_ , help='''Path to the HuggingFace Datasets folder.''' ) train_parser.set_defaults(func=lowercase_ ) def __init__( self : int , __a : List[str] , __a : List[Any] , *__a : Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase = get_logger('''datasets-cli/converting''' ) __UpperCAmelCase = tfds_path __UpperCAmelCase = datasets_directory def snake_case__ ( self : int ) -> str: if os.path.isdir(self._tfds_path ): __UpperCAmelCase = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): __UpperCAmelCase = os.path.dirname(self._tfds_path ) else: raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' ) __UpperCAmelCase = os.path.abspath(self._datasets_directory ) self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" ) __UpperCAmelCase = [] __UpperCAmelCase = [] __UpperCAmelCase = {} if os.path.isdir(self._tfds_path ): __UpperCAmelCase = os.listdir(lowercase_ ) else: __UpperCAmelCase = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f"""Looking at file {f_name}""" ) __UpperCAmelCase = os.path.join(lowercase_ , lowercase_ ) __UpperCAmelCase = os.path.join(lowercase_ , lowercase_ ) if not os.path.isfile(lowercase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('''Skipping file''' ) continue with open(lowercase_ , encoding='''utf-8''' ) as f: __UpperCAmelCase = f.readlines() __UpperCAmelCase = [] __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = [] for line in lines: __UpperCAmelCase = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: __UpperCAmelCase = "import datasets\n" elif "import tensorflow" in out_line: # order is important here __UpperCAmelCase = "" continue elif "from absl import logging" in out_line: __UpperCAmelCase = "from datasets import logging\n" elif "getLogger" in out_line: __UpperCAmelCase = out_line.replace('''getLogger''' , '''get_logger''' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): __UpperCAmelCase = True __UpperCAmelCase = list(filter(lambda __a : e in out_line , lowercase_ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowercase_ ) + '''\n''' ) out_lines.append(lowercase_ ) out_lines.append(lowercase_ ) continue else: for pattern, replacement in TO_CONVERT: __UpperCAmelCase = re.sub(lowercase_ , lowercase_ , lowercase_ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: __UpperCAmelCase = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , lowercase_ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) ) __UpperCAmelCase = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f"""Error converting {out_line.strip()}""" ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: __UpperCAmelCase = True out_lines.append(lowercase_ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset __UpperCAmelCase = f_name.replace('''.py''' , '''''' ) __UpperCAmelCase = os.path.join(lowercase_ , lowercase_ ) __UpperCAmelCase = os.path.join(lowercase_ , lowercase_ ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) self._logger.info(f"""Adding directory {output_dir}""" ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowercase_ ) if needs_manual_update: with_manual_update.append(lowercase_ ) with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f: f.writelines(lowercase_ ) self._logger.info(f"""Converted in {output_file}""" ) for utils_file in utils_files: try: __UpperCAmelCase = os.path.basename(lowercase_ ) __UpperCAmelCase = imports_to_builder_map[f_name.replace('''.py''' , '''''' )] self._logger.info(f"""Moving {dest_folder} to {utils_file}""" ) shutil.copy(lowercase_ , lowercase_ ) except KeyError: self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f"""You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.""" )
704
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowerCAmelCase ( ): """simple docstring""" raise RuntimeError('''CUDA out of memory.''' ) class A ( nn.Module ): def __init__( self : Optional[Any] ) -> int: super().__init__() __UpperCAmelCase = nn.Linear(3 , 4 ) __UpperCAmelCase = nn.BatchNormad(4 ) __UpperCAmelCase = nn.Linear(4 , 5 ) def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]: return self.lineara(self.batchnorm(self.lineara(__a ) ) ) class A ( unittest.TestCase ): def snake_case__ ( self : Optional[int] ) -> Any: __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_2_8 ) def mock_training_loop_function(__a : Union[str, Any] ): nonlocal batch_sizes batch_sizes.append(__a ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] ) def snake_case__ ( self : str ) -> int: __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_2_8 ) def mock_training_loop_function(__a : str , __a : Optional[int] ): nonlocal batch_sizes batch_sizes.append(__a ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga __UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' ) self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def snake_case__ ( self : Any ) -> int: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(__a : Optional[int] ): pass with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def snake_case__ ( self : Any ) -> List[Any]: @find_executable_batch_size(starting_batch_size=1_6 ) def mock_training_loop_function(__a : Dict ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def snake_case__ ( self : List[Any] ) -> List[str]: @find_executable_batch_size(starting_batch_size=1_2_8 ) def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(__a ) as cm: mock_training_loop_function(1_2_8 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def snake_case__ ( self : Tuple ) -> Optional[Any]: @find_executable_batch_size(starting_batch_size=1_6 ) def mock_training_loop_function(__a : Tuple ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def snake_case__ ( self : Any ) -> List[Any]: __UpperCAmelCase = torch.cuda.memory_allocated() __UpperCAmelCase = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , __a ) __UpperCAmelCase = release_memory(__a ) self.assertEqual(torch.cuda.memory_allocated() , __a )
654
0
'''simple docstring''' from pathlib import Path import fire def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ): """simple docstring""" __UpperCAmelCase = Path(UpperCamelCase__ ) __UpperCAmelCase = Path(UpperCamelCase__ ) dest_dir.mkdir(exist_ok=UpperCamelCase__ ) for path in src_dir.iterdir(): __UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n] __UpperCAmelCase = dest_dir.joinpath(path.name ) print(UpperCamelCase__ ) dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) ) if __name__ == "__main__": fire.Fire(minify)
705
'''simple docstring''' from __future__ import annotations import math def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = u for i in range(1 , UpperCamelCase__ ): __UpperCAmelCase = temp * (u - i) return temp def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = int(input('''enter the numbers of values: ''' ) ) __UpperCAmelCase = [] for _ in range(UpperCamelCase__ ): y.append([] ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): y[i].append(UpperCamelCase__ ) __UpperCAmelCase = 0 print('''enter the values of parameters in a list: ''' ) __UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) ) print('''enter the values of corresponding parameters: ''' ) for i in range(UpperCamelCase__ ): __UpperCAmelCase = float(input() ) __UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) ) __UpperCAmelCase = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , UpperCamelCase__ ): for j in range(n - i ): __UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1] __UpperCAmelCase = y[0][0] for i in range(1 , UpperCamelCase__ ): summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ ) print(f"""the value at {value} is {summ}""" ) if __name__ == "__main__": main()
654
0
'''simple docstring''' from string import ascii_lowercase, ascii_uppercase def lowerCAmelCase ( UpperCamelCase__ : str ): """simple docstring""" if not sentence: return "" __UpperCAmelCase = dict(zip(_lowerCamelCase , _lowerCamelCase ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
706
'''simple docstring''' import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : Dict = logging.get_logger(__name__) def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ): """simple docstring""" __UpperCAmelCase = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''), ('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''), ('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''), ('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''), ('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''), ('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''), ] ) return rename_keys def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ): """simple docstring""" for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) __UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" ) __UpperCAmelCase = in_proj_weight[ : encoder_config.hidden_size, : ] __UpperCAmelCase = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] __UpperCAmelCase = in_proj_weight[ -encoder_config.hidden_size :, : ] def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ): """simple docstring""" __UpperCAmelCase = dct.pop(UpperCamelCase__ ) __UpperCAmelCase = val def lowerCAmelCase ( UpperCamelCase__ : Dict ): """simple docstring""" if "handwritten" in checkpoint_url: __UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: __UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg''' __UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' ) return im @torch.no_grad() def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ): """simple docstring""" __UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ ) __UpperCAmelCase = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: __UpperCAmelCase = 7_6_8 elif "large" in checkpoint_url: # use ViT-large encoder __UpperCAmelCase = 1_0_2_4 __UpperCAmelCase = 4_0_9_6 __UpperCAmelCase = 2_4 __UpperCAmelCase = 1_6 __UpperCAmelCase = 1_0_2_4 else: raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: __UpperCAmelCase = False __UpperCAmelCase = '''relu''' __UpperCAmelCase = 1_0_2_4 __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = False # load HuggingFace model __UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ ) __UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ ) __UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) model.eval() # load state_dict of original model, rename some keys __UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model'''] __UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): __UpperCAmelCase = state_dict.pop(UpperCamelCase__ ) if key.startswith('''decoder''' ) and "output_projection" not in key: __UpperCAmelCase = val else: __UpperCAmelCase = val # load state dict model.load_state_dict(UpperCamelCase__ ) # Check outputs on an image __UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size ) __UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' ) __UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ ) __UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values # verify logits __UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) __UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ) __UpperCAmelCase = outputs.logits __UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] ) if "trocr-base-handwritten" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] ) elif "trocr-large-handwritten" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] ) elif "trocr-base-printed" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] ) elif "trocr-large-printed" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected" Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(UpperCamelCase__ ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) __lowerCAmelCase : Optional[int] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
654
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class A ( UpperCAmelCase ): def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> None: warnings.warn( '''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ChineseCLIPImageProcessor instead.''' , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
707
'''simple docstring''' import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class A ( unittest.TestCase ): def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]: return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy""" def snake_case__ ( self : Dict ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple: __UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa __UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a ) return image def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any: __UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa __UpperCAmelCase = '''bf16''' if fpaa else None __UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained( __a , subfolder='''unet''' , dtype=__a , revision=__a ) return model, params def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]: __UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa __UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]], [1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]], [8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]], [3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]], # fmt: on ] ) def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any: __UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a ) __UpperCAmelCase = self.get_latents(__a , fpaa=__a ) __UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a ) __UpperCAmelCase = model.apply( {'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample assert sample.shape == latents.shape __UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__a , __a , atol=1e-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]], [1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]], [8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]], [3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]], # fmt: on ] ) def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a ) __UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a ) __UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a ) __UpperCAmelCase = model.apply( {'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample assert sample.shape == latents.shape __UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__a , __a , atol=1e-2 )
654
0
'''simple docstring''' from math import pow, sqrt def lowerCAmelCase ( *UpperCamelCase__ : float ): """simple docstring""" __UpperCAmelCase = len(UpperCamelCase__ ) > 0 and all(value > 0.0 for value in values ) return result def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float ): """simple docstring""" return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(UpperCamelCase__ , UpperCamelCase__ ) else ValueError('''Input Error: Molar mass values must greater than 0.''' ) ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ): """simple docstring""" return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ): """simple docstring""" return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ): """simple docstring""" return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ): """simple docstring""" return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) )
708
'''simple docstring''' import argparse import os import re import packaging.version __lowerCAmelCase : Optional[int] = "examples/" __lowerCAmelCase : Dict = { "examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","), "doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"), } __lowerCAmelCase : List[str] = { "init": "src/transformers/__init__.py", "setup": "setup.py", } __lowerCAmelCase : int = "README.md" def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ): """simple docstring""" with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __UpperCAmelCase = f.read() __UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern] __UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ ) __UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(UpperCamelCase__ ) def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ): """simple docstring""" for folder, directories, fnames in os.walk(UpperCamelCase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' ) def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if not patch: update_version_in_examples(UpperCamelCase__ ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = '''🤗 Transformers currently provides the following architectures''' __UpperCAmelCase = '''1. Want to contribute a new model?''' with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __UpperCAmelCase = f.readlines() # Find the start of the list. __UpperCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __UpperCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): __UpperCAmelCase = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(UpperCamelCase__ ) def lowerCAmelCase ( ): """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: __UpperCAmelCase = f.read() __UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0] return packaging.version.parse(UpperCamelCase__ ) def lowerCAmelCase ( UpperCamelCase__ : Any=False ): """simple docstring""" __UpperCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: __UpperCAmelCase = default_version.base_version elif patch: __UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" ) if len(UpperCamelCase__ ) == 0: __UpperCAmelCase = default_version print(f"""Updating version to {version}.""" ) global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = get_version() __UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __UpperCAmelCase = current_version.base_version # Check with the user we got that right. __UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(UpperCamelCase__ ) == 0: __UpperCAmelCase = dev_version print(f"""Updating version to {version}.""" ) global_version_update(UpperCamelCase__ ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": __lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") __lowerCAmelCase : Tuple = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
654
0
'''simple docstring''' import mpmath # for roots of unity import numpy as np class SCREAMING_SNAKE_CASE_ : def __init__( self : str , __a : Optional[Any]=None , __a : int=None ) -> str: # Input as list __UpperCAmelCase = list(poly_a or [0] )[:] __UpperCAmelCase = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() __UpperCAmelCase = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() __UpperCAmelCase = len(self.polyB ) # Add 0 to make lengths equal a power of 2 __UpperCAmelCase = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform __UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product __UpperCAmelCase = self.__multiply() def snake_case__ ( self : Optional[int] , __a : Optional[int] ) -> Any: __UpperCAmelCase = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB] # Corner case if len(UpperCamelCase__ ) <= 1: return dft[0] # __UpperCAmelCase = self.c_max_length // 2 while next_ncol > 0: __UpperCAmelCase = [[] for i in range(UpperCamelCase__ )] __UpperCAmelCase = self.root**next_ncol # First half of next step __UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(UpperCamelCase__ ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step __UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(UpperCamelCase__ ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update __UpperCAmelCase = new_dft __UpperCAmelCase = next_ncol // 2 return dft[0] def snake_case__ ( self : Optional[Any] ) -> Any: __UpperCAmelCase = self.__dft('''A''' ) __UpperCAmelCase = self.__dft('''B''' ) __UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT __UpperCAmelCase = 2 while next_ncol <= self.c_max_length: __UpperCAmelCase = [[] for i in range(UpperCamelCase__ )] __UpperCAmelCase = self.root ** (next_ncol // 2) __UpperCAmelCase = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update __UpperCAmelCase = new_inverse_c next_ncol *= 2 # Unpack __UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Tuple ) -> List[Any]: __UpperCAmelCase = 'A = ' + ' + '.join( f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) ) __UpperCAmelCase = 'B = ' + ' + '.join( f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) ) __UpperCAmelCase = 'A*B = ' + ' + '.join( f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) ) return f"""{a}\n{b}\n{c}""" # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
709
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : Tuple ): """simple docstring""" # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection __UpperCAmelCase = len(UpperCamelCase__ ) __UpperCAmelCase = max(UpperCamelCase__ ) __UpperCAmelCase = min(UpperCamelCase__ ) # create the counting array __UpperCAmelCase = coll_max + 1 - coll_min __UpperCAmelCase = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , UpperCamelCase__ ): __UpperCAmelCase = counting_arr[i] + counting_arr[i - 1] # create the output collection __UpperCAmelCase = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , UpperCamelCase__ ) ): __UpperCAmelCase = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def lowerCAmelCase ( UpperCamelCase__ : Any ): """simple docstring""" return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt" __lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip() __lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")] print(counting_sort(unsorted))
654
0
'''simple docstring''' import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() __lowerCAmelCase : Any = logging.get_logger(__name__) __lowerCAmelCase : Dict = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS} def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : int ): """simple docstring""" if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" ) if tokenizer_name is None: __UpperCAmelCase = TOKENIZER_CLASSES else: __UpperCAmelCase = {tokenizer_name: getattr(UpperCamelCase__ , tokenizer_name + '''Fast''' )} logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" ) for tokenizer_name in tokenizer_names: __UpperCAmelCase = TOKENIZER_CLASSES[tokenizer_name] __UpperCAmelCase = True if checkpoint_name is None: __UpperCAmelCase = list(tokenizer_class.max_model_input_sizes.keys() ) else: __UpperCAmelCase = [checkpoint_name] logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" ) for checkpoint in checkpoint_names: logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" ) # Load tokenizer __UpperCAmelCase = tokenizer_class.from_pretrained(UpperCamelCase__ , force_download=UpperCamelCase__ ) # Save fast tokenizer logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" ) # For organization names we create sub-directories if "/" in checkpoint: __UpperCAmelCase , __UpperCAmelCase = checkpoint.split('''/''' ) __UpperCAmelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) elif add_prefix: __UpperCAmelCase = checkpoint __UpperCAmelCase = dump_path else: __UpperCAmelCase = None __UpperCAmelCase = dump_path logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: __UpperCAmelCase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] __UpperCAmelCase = file_path.split(UpperCamelCase__ )[-1][0] if next_char == "/": __UpperCAmelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) __UpperCAmelCase = None logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) __UpperCAmelCase = tokenizer.save_pretrained( UpperCamelCase__ , legacy_format=UpperCamelCase__ , filename_prefix=UpperCamelCase__ ) logger.info(f"""=> File names {file_names}""" ) for file_name in file_names: if not file_name.endswith('''tokenizer.json''' ): os.remove(UpperCamelCase__ ) logger.info(f"""=> removing {file_name}""" ) if __name__ == "__main__": __lowerCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files." ) parser.add_argument( "--tokenizer_name", default=None, type=str, help=( F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """ "download and convert all the checkpoints from AWS." ), ) parser.add_argument( "--checkpoint_name", default=None, type=str, help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.", ) parser.add_argument( "--force_download", action="store_true", help="Re-download checkpoints.", ) __lowerCAmelCase : Tuple = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
710
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ): """simple docstring""" __UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" __UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' ) __UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
654
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A ( unittest.TestCase ): def __init__( self : str , __a : str , __a : int=1_3 , __a : Optional[Any]=3 , __a : List[str]=2_2_4 , __a : List[Any]=3_0 , __a : Optional[int]=4_0_0 , __a : List[Any]=True , __a : Dict=None , __a : Union[str, Any]=True , __a : Dict=[0.5, 0.5, 0.5] , __a : int=[0.5, 0.5, 0.5] , ) -> Tuple: __UpperCAmelCase = size if size is not None else {'''height''': 1_8, '''width''': 1_8} __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = num_channels __UpperCAmelCase = image_size __UpperCAmelCase = min_resolution __UpperCAmelCase = max_resolution __UpperCAmelCase = do_resize __UpperCAmelCase = size __UpperCAmelCase = do_normalize __UpperCAmelCase = image_mean __UpperCAmelCase = image_std def snake_case__ ( self : int ) -> str: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class A ( lowercase_ , unittest.TestCase ): a_ = ViTImageProcessor if is_vision_available() else None def snake_case__ ( self : Tuple ) -> Union[str, Any]: __UpperCAmelCase = EfficientFormerImageProcessorTester(self ) @property def snake_case__ ( self : List[str] ) -> Tuple: return self.image_proc_tester.prepare_image_processor_dict() def snake_case__ ( self : Optional[Any] ) -> int: __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''size''' ) ) def snake_case__ ( self : Dict ) -> Union[str, Any]: pass def snake_case__ ( self : str ) -> Any: __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input __UpperCAmelCase = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched __UpperCAmelCase = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def snake_case__ ( self : Tuple ) -> Optional[int]: __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input __UpperCAmelCase = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched __UpperCAmelCase = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def snake_case__ ( self : str ) -> List[Any]: __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input __UpperCAmelCase = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched __UpperCAmelCase = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , )
711
'''simple docstring''' from __future__ import annotations from statistics import mean def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = [0] * no_of_processes __UpperCAmelCase = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(UpperCamelCase__ ): __UpperCAmelCase = burst_time[i] __UpperCAmelCase = [] __UpperCAmelCase = 0 __UpperCAmelCase = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: __UpperCAmelCase = [] __UpperCAmelCase = -1 for i in range(UpperCamelCase__ ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: __UpperCAmelCase = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: __UpperCAmelCase = i total_time += burst_time[target_process] completed += 1 __UpperCAmelCase = 0 __UpperCAmelCase = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ): """simple docstring""" __UpperCAmelCase = [0] * no_of_processes for i in range(UpperCamelCase__ ): __UpperCAmelCase = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("[TEST CASE 01]") __lowerCAmelCase : List[Any] = 4 __lowerCAmelCase : List[Any] = [2, 5, 3, 7] __lowerCAmelCase : Tuple = [0, 0, 0, 0] __lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes) __lowerCAmelCase : Dict = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time") for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
654
0
'''simple docstring''' import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class A ( __UpperCAmelCase ): a_ = '''vision-encoder-decoder''' a_ = True def __init__( self : int , **__a : Dict ) -> Tuple: super().__init__(**lowerCAmelCase_ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"""A configuraton of type {self.model_type} cannot be instantiated because """ f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" ) __UpperCAmelCase = kwargs.pop('''encoder''' ) __UpperCAmelCase = encoder_config.pop('''model_type''' ) __UpperCAmelCase = kwargs.pop('''decoder''' ) __UpperCAmelCase = decoder_config.pop('''model_type''' ) __UpperCAmelCase = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ ) __UpperCAmelCase = AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ ) __UpperCAmelCase = True @classmethod def snake_case__ ( cls : Dict , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> Optional[Any]: logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) __UpperCAmelCase = True __UpperCAmelCase = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase_ ) def snake_case__ ( self : str ) -> int: __UpperCAmelCase = copy.deepcopy(self.__dict__ ) __UpperCAmelCase = self.encoder.to_dict() __UpperCAmelCase = self.decoder.to_dict() __UpperCAmelCase = self.__class__.model_type return output class A ( __UpperCAmelCase ): a_ = version.parse('''1.11''' ) @property def snake_case__ ( self : Any ) -> Union[str, Any]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def snake_case__ ( self : int ) -> Any: return 1e-4 @property def snake_case__ ( self : Dict ) -> Any: return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} ) class A ( __UpperCAmelCase ): @property def snake_case__ ( self : Any ) -> int: __UpperCAmelCase = OrderedDict() __UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} __UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} __UpperCAmelCase = {0: '''batch''', 1: '''encoder_sequence'''} return common_inputs def snake_case__ ( self : Dict , __a : "PreTrainedTokenizerBase" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , ) -> List[Any]: import torch __UpperCAmelCase = OrderedDict() __UpperCAmelCase = super().generate_dummy_inputs( lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ ) __UpperCAmelCase , __UpperCAmelCase = dummy_input['''input_ids'''].shape __UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size) __UpperCAmelCase = dummy_input.pop('''input_ids''' ) __UpperCAmelCase = dummy_input.pop('''attention_mask''' ) __UpperCAmelCase = torch.zeros(lowerCAmelCase_ ) return common_inputs class A ( __UpperCAmelCase ): @property def snake_case__ ( self : str ) -> List[str]: pass def snake_case__ ( self : Any , __a : PretrainedConfig ) -> Tuple: return VisionEncoderDecoderEncoderOnnxConfig(lowerCAmelCase_ ) def snake_case__ ( self : Any , __a : PretrainedConfig , __a : PretrainedConfig , __a : str = "default" ) -> List[Any]: __UpperCAmelCase = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(lowerCAmelCase_ , lowerCAmelCase_ )
712
'''simple docstring''' from ..utils import DummyObject, requires_backends class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : int , **__a : int ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict: requires_backends(cls , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : str , **__a : Any ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : Any , **__a : int ) -> Tuple: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]: requires_backends(cls , ['''torch'''] )
654
0
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float ): """simple docstring""" return round(float(moles / volume ) * nfactor ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ): """simple docstring""" return round(float((moles * 0.08_21 * temperature) / (volume) ) ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ): """simple docstring""" return round(float((moles * 0.08_21 * temperature) / (pressure) ) ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ): """simple docstring""" return round(float((pressure * volume) / (0.08_21 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
713
'''simple docstring''' import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
654
0
import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str="attention" ): """simple docstring""" __UpperCAmelCase = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] __UpperCAmelCase = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] __UpperCAmelCase = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] __UpperCAmelCase = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=False ): """simple docstring""" if split_mlp_wi: __UpperCAmelCase = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] __UpperCAmelCase = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] __UpperCAmelCase = (wi_a, wi_a) else: __UpperCAmelCase = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""] __UpperCAmelCase = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ): """simple docstring""" return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""] def lowerCAmelCase ( UpperCamelCase__ : Dict , *, UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase = traverse_util.flatten_dict(variables['''target'''] ) __UpperCAmelCase = {'''/'''.join(UpperCAmelCase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __UpperCAmelCase = '''encoder/layers_0/mlp/wi_0/kernel''' in old print('''Split MLP:''' , UpperCAmelCase__ ) __UpperCAmelCase = collections.OrderedDict() # Shared embeddings. __UpperCAmelCase = old['''token_embedder/embedding'''] # Encoder. for i in range(UpperCAmelCase__ ): # Block i, layer 0 (Self Attention). __UpperCAmelCase = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''encoder''' , '''pre_attention_layer_norm''' ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''encoder''' , '''attention''' ) __UpperCAmelCase = layer_norm __UpperCAmelCase = k.T __UpperCAmelCase = o.T __UpperCAmelCase = q.T __UpperCAmelCase = v.T # Block i, layer 1 (MLP). __UpperCAmelCase = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''encoder''' , '''pre_mlp_layer_norm''' ) __UpperCAmelCase , __UpperCAmelCase = tax_mlp_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''encoder''' , UpperCAmelCase__ ) __UpperCAmelCase = layer_norm if split_mlp_wi: __UpperCAmelCase = wi[0].T __UpperCAmelCase = wi[1].T else: __UpperCAmelCase = wi.T __UpperCAmelCase = wo.T __UpperCAmelCase = old[ '''encoder/relpos_bias/rel_embedding''' ].T __UpperCAmelCase = old['''encoder/encoder_norm/scale'''] if not is_encoder_only: # Decoder. for i in range(UpperCAmelCase__ ): # Block i, layer 0 (Self Attention). __UpperCAmelCase = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''decoder''' , '''pre_self_attention_layer_norm''' ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''decoder''' , '''self_attention''' ) __UpperCAmelCase = layer_norm __UpperCAmelCase = k.T __UpperCAmelCase = o.T __UpperCAmelCase = q.T __UpperCAmelCase = v.T # Block i, layer 1 (Cross Attention). __UpperCAmelCase = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''decoder''' , '''pre_cross_attention_layer_norm''' ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''decoder''' , '''encoder_decoder_attention''' ) __UpperCAmelCase = layer_norm __UpperCAmelCase = k.T __UpperCAmelCase = o.T __UpperCAmelCase = q.T __UpperCAmelCase = v.T # Block i, layer 2 (MLP). __UpperCAmelCase = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''decoder''' , '''pre_mlp_layer_norm''' ) __UpperCAmelCase , __UpperCAmelCase = tax_mlp_lookup(UpperCAmelCase__ , UpperCAmelCase__ , '''decoder''' , UpperCAmelCase__ ) __UpperCAmelCase = layer_norm if split_mlp_wi: __UpperCAmelCase = wi[0].T __UpperCAmelCase = wi[1].T else: __UpperCAmelCase = wi.T __UpperCAmelCase = wo.T __UpperCAmelCase = old['''decoder/decoder_norm/scale'''] __UpperCAmelCase = old[ '''decoder/relpos_bias/rel_embedding''' ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __UpperCAmelCase = old['''decoder/logits_dense/kernel'''].T return new def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Dict ): """simple docstring""" __UpperCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __UpperCAmelCase = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __UpperCAmelCase = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''' ) __UpperCAmelCase = state_dict['''shared.weight'''] return state_dict def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ): """simple docstring""" __UpperCAmelCase = checkpoints.load_tax_checkpoint(UpperCAmelCase__ ) __UpperCAmelCase = convert_tax_to_pytorch(UpperCAmelCase__ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase__ ) __UpperCAmelCase = make_state_dict(UpperCAmelCase__ , UpperCAmelCase__ ) model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ ) def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] = False ): """simple docstring""" __UpperCAmelCase = TaConfig.from_json_file(UpperCAmelCase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __UpperCAmelCase = TaEncoderModel(UpperCAmelCase__ ) else: __UpperCAmelCase = TaForConditionalGeneration(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCAmelCase__ ) # Verify that we can load the checkpoint. model.from_pretrained(UpperCAmelCase__ ) print('''Done''' ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.") # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False ) __lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
714
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : Optional[Any] = { "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = ["LlamaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = ["LlamaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = [ "LlamaForCausalLM", "LlamaModel", "LlamaPreTrainedModel", "LlamaForSequenceClassification", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
654
0
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A ( UpperCAmelCase ): a_ = ['''image_processor''', '''tokenizer'''] a_ = '''BlipImageProcessor''' a_ = '''AutoTokenizer''' def __init__( self : Union[str, Any] , __a : List[Any] , __a : List[str] ) -> Optional[Any]: __UpperCAmelCase = False super().__init__(__UpperCamelCase , __UpperCamelCase ) __UpperCAmelCase = self.image_processor def __call__( self : Any , __a : ImageInput = None , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ) -> int: if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: __UpperCAmelCase = self.tokenizer __UpperCAmelCase = self.tokenizer( text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , ) return text_encoding # add pixel_values __UpperCAmelCase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase ) if text is not None: __UpperCAmelCase = self.tokenizer( text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , ) else: __UpperCAmelCase = None if text_encoding is not None: encoding_image_processor.update(__UpperCamelCase ) return encoding_image_processor def snake_case__ ( self : Any , *__a : Union[str, Any] , **__a : List[Any] ) -> Any: return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase ) def snake_case__ ( self : List[str] , *__a : List[Any] , **__a : int ) -> str: return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def snake_case__ ( self : Optional[int] ) -> Tuple: __UpperCAmelCase = self.tokenizer.model_input_names __UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
715
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ): """simple docstring""" __UpperCAmelCase = {} if train_file is not None: __UpperCAmelCase = [train_file] if eval_file is not None: __UpperCAmelCase = [eval_file] if test_file is not None: __UpperCAmelCase = [test_file] __UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ ) __UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() ) __UpperCAmelCase = features_name.pop(UpperCamelCase__ ) __UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) ) __UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )} __UpperCAmelCase = tokenizer.model_input_names __UpperCAmelCase = {} if len(UpperCamelCase__ ) == 1: for k in files.keys(): __UpperCAmelCase = ds[k].map( lambda UpperCamelCase__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , ) elif len(UpperCamelCase__ ) == 2: for k in files.keys(): __UpperCAmelCase = ds[k].map( lambda UpperCamelCase__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid __lowerCAmelCase : List[Any] = logging.getLogger(__name__) @dataclass class A : a_ = field(metadata={'''help''': '''Which column contains the label'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} ) a_ = field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class A : a_ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def lowerCAmelCase ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ f"""16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , ) def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict: __UpperCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __UpperCAmelCase = TFTrainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCAmelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __UpperCAmelCase = trainer.evaluate() __UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(UpperCamelCase__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) results.update(UpperCamelCase__ ) return results if __name__ == "__main__": main()
654
0
import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training") # TF training parameters __lowerCAmelCase : List[str] = False __lowerCAmelCase : List[Any] = False def lowerCAmelCase ( UpperCamelCase__ : Namespace ): """simple docstring""" return TrainCommand(_UpperCamelCase ) class A ( UpperCAmelCase ): @staticmethod def snake_case__ ( __a : Any ) -> List[Any]: __UpperCAmelCase = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' ) train_parser.add_argument( '''--train_data''' , type=__a , required=__a , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , ) train_parser.add_argument( '''--column_label''' , type=__a , default=0 , help='''Column of the dataset csv file with example labels.''' ) train_parser.add_argument( '''--column_text''' , type=__a , default=1 , help='''Column of the dataset csv file with example texts.''' ) train_parser.add_argument( '''--column_id''' , type=__a , default=2 , help='''Column of the dataset csv file with example ids.''' ) train_parser.add_argument( '''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' ) train_parser.add_argument('''--validation_data''' , type=__a , default='''''' , help='''path to validation dataset.''' ) train_parser.add_argument( '''--validation_split''' , type=__a , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , ) train_parser.add_argument('''--output''' , type=__a , default='''./''' , help='''path to saved the trained model.''' ) train_parser.add_argument( '''--task''' , type=__a , default='''text_classification''' , help='''Task to train the model on.''' ) train_parser.add_argument( '''--model''' , type=__a , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' ) train_parser.add_argument('''--train_batch_size''' , type=__a , default=3_2 , help='''Batch size for training.''' ) train_parser.add_argument('''--valid_batch_size''' , type=__a , default=6_4 , help='''Batch size for validation.''' ) train_parser.add_argument('''--learning_rate''' , type=__a , default=3e-5 , help='''Learning rate.''' ) train_parser.add_argument('''--adam_epsilon''' , type=__a , default=1e-08 , help='''Epsilon for Adam optimizer.''' ) train_parser.set_defaults(func=__a ) def __init__( self : Tuple , __a : Any ) -> int: __UpperCAmelCase = logging.get_logger('''transformers-cli/training''' ) __UpperCAmelCase = '''tf''' if is_tf_available() else '''torch''' os.makedirs(args.output , exist_ok=__a ) __UpperCAmelCase = args.output __UpperCAmelCase = args.column_label __UpperCAmelCase = args.column_text __UpperCAmelCase = args.column_id self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" ) if args.task == "text_classification": __UpperCAmelCase = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f"""Loading dataset from {args.train_data}""" ) __UpperCAmelCase = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) __UpperCAmelCase = None if args.validation_data: self.logger.info(f"""Loading validation dataset from {args.validation_data}""" ) __UpperCAmelCase = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) __UpperCAmelCase = args.validation_split __UpperCAmelCase = args.train_batch_size __UpperCAmelCase = args.valid_batch_size __UpperCAmelCase = args.learning_rate __UpperCAmelCase = args.adam_epsilon def snake_case__ ( self : Union[str, Any] ) -> List[str]: if self.framework == "tf": return self.run_tf() return self.run_torch() def snake_case__ ( self : List[str] ) -> List[str]: raise NotImplementedError def snake_case__ ( self : Union[str, Any] ) -> Dict: self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
716
'''simple docstring''' from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class A : def __init__( self : List[Any] , __a : Any , ) -> Dict: __UpperCAmelCase = parent __UpperCAmelCase = 1_3 __UpperCAmelCase = 7 __UpperCAmelCase = True __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = True __UpperCAmelCase = 9_9 __UpperCAmelCase = 3_2 __UpperCAmelCase = 2 __UpperCAmelCase = 4 __UpperCAmelCase = 3_7 __UpperCAmelCase = '''gelu''' __UpperCAmelCase = 0.1 __UpperCAmelCase = 0.1 __UpperCAmelCase = 5_1_2 __UpperCAmelCase = 1_6 __UpperCAmelCase = 2 __UpperCAmelCase = 0.0_2 __UpperCAmelCase = 3 __UpperCAmelCase = 4 __UpperCAmelCase = None def snake_case__ ( self : Optional[int] ) -> Dict: __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase = None if self.use_input_mask: __UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any: __UpperCAmelCase = TFDistilBertModel(config=__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) __UpperCAmelCase = [input_ids, input_mask] __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int: __UpperCAmelCase = TFDistilBertForMaskedLM(config=__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict: __UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a ) __UpperCAmelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, } __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict: __UpperCAmelCase = self.num_labels __UpperCAmelCase = TFDistilBertForSequenceClassification(__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str: __UpperCAmelCase = self.num_choices __UpperCAmelCase = TFDistilBertForMultipleChoice(__a ) __UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, } __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int: __UpperCAmelCase = self.num_labels __UpperCAmelCase = TFDistilBertForTokenClassification(__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : str ) -> Any: __UpperCAmelCase = self.prepare_config_and_inputs() ((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): a_ = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) a_ = ( { '''feature-extraction''': TFDistilBertModel, '''fill-mask''': TFDistilBertForMaskedLM, '''question-answering''': TFDistilBertForQuestionAnswering, '''text-classification''': TFDistilBertForSequenceClassification, '''token-classification''': TFDistilBertForTokenClassification, '''zero-shot''': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) a_ = False a_ = False def snake_case__ ( self : Any ) -> Any: __UpperCAmelCase = TFDistilBertModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 ) def snake_case__ ( self : List[Any] ) -> Optional[int]: self.config_tester.run_common_tests() def snake_case__ ( self : Any ) -> str: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__a ) def snake_case__ ( self : Tuple ) -> Dict: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__a ) def snake_case__ ( self : Union[str, Any] ) -> Any: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__a ) def snake_case__ ( self : Optional[Any] ) -> Dict: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a ) def snake_case__ ( self : Any ) -> int: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a ) def snake_case__ ( self : List[str] ) -> List[Any]: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__a ) @slow def snake_case__ ( self : Dict ) -> Tuple: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): __UpperCAmelCase = TFDistilBertModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_tf class A ( unittest.TestCase ): @slow def snake_case__ ( self : int ) -> Dict: __UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCAmelCase = model(__a )[0] __UpperCAmelCase = [1, 6, 7_6_8] self.assertEqual(output.shape , __a ) __UpperCAmelCase = tf.constant( [ [ [0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9], [0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4], [0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
654
0
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split __lowerCAmelCase : Optional[int] = datasets.load_iris() __lowerCAmelCase : Optional[int] = np.array(data["data"]) __lowerCAmelCase : int = np.array(data["target"]) __lowerCAmelCase : Dict = data["""target_names"""] __lowerCAmelCase : List[str] = train_test_split(X, y) def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ): """simple docstring""" return np.linalg.norm(np.array(UpperCamelCase__ ) - np.array(UpperCamelCase__ ) ) def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=5 ): """simple docstring""" __UpperCAmelCase = zip(UpperCamelCase__ , UpperCamelCase__ ) # List of distances of all points from the point to be classified __UpperCAmelCase = [] for data_point in data: __UpperCAmelCase = euclidean_distance(data_point[0] , UpperCamelCase__ ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. __UpperCAmelCase = [i[1] for i in sorted(UpperCamelCase__ )[:k]] # Most commonly occurring class among them # is the class into which the point is classified __UpperCAmelCase = Counter(UpperCamelCase__ ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
717
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __lowerCAmelCase : List[Any] = { "configuration_audio_spectrogram_transformer": [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ASTConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ASTForAudioClassification", "ASTModel", "ASTPreTrainedModel", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
654
0
import functools def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ): """simple docstring""" __UpperCAmelCase = len(UpperCamelCase__ ) __UpperCAmelCase = len(UpperCamelCase__ ) @functools.cache def min_distance(UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa __UpperCAmelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , UpperCamelCase__ ) , 1 + min_distance(UpperCamelCase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
718
'''simple docstring''' from ...configuration_utils import PretrainedConfig class A ( UpperCAmelCase ): a_ = '''bert-generation''' def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = hidden_act __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = position_embedding_type __UpperCAmelCase = use_cache
654
0
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( "stable diffusion controlnet", "0.22.0", "Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.", standard_warn=False, stacklevel=3, )
719
'''simple docstring''' from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) __lowerCAmelCase : str = 299_792_458 # Symbols __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z") def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 ) def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" return np.array( [ [gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0], [-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ): """simple docstring""" # Ensure event is not empty if event is None: __UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(UpperCamelCase__ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: __lowerCAmelCase : Dict = transform(29_979_245) print("Example of four vector: ") print(F"""ct' = {four_vector[0]}""") print(F"""x' = {four_vector[1]}""") print(F"""y' = {four_vector[2]}""") print(F"""z' = {four_vector[3]}""") # Substitute symbols with numerical values __lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1} __lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)] print(F"""\n{numerical_vector}""")
654
0
'''simple docstring''' import gc import threading import time import psutil import torch class A : def __init__( self : List[str] ) -> List[str]: __UpperCAmelCase = psutil.Process() __UpperCAmelCase = False def snake_case__ ( self : Tuple ) -> int: __UpperCAmelCase = -1 while True: __UpperCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak ) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase = True __UpperCAmelCase = threading.Thread(target=self.peak_monitor ) __UpperCAmelCase = True self.thread.start() def snake_case__ ( self : Any ) -> Union[str, Any]: __UpperCAmelCase = False self.thread.join() return self.cpu_memory_peak __lowerCAmelCase : Dict = PeakCPUMemory() def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = {'''time''': time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem __UpperCAmelCase = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count() ): __UpperCAmelCase = torch.cuda.memory_allocated(UpperCamelCase__ ) torch.cuda.reset_peak_memory_stats() return measures def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] ): """simple docstring""" __UpperCAmelCase = {'''time''': time.time() - start_measures['''time''']} gc.collect() torch.cuda.empty_cache() # CPU mem __UpperCAmelCase = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**2_0 __UpperCAmelCase = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**2_0 # GPU mem for i in range(torch.cuda.device_count() ): __UpperCAmelCase = (torch.cuda.memory_allocated(UpperCamelCase__ ) - start_measures[str(UpperCamelCase__ )]) / 2**2_0 __UpperCAmelCase = (torch.cuda.max_memory_allocated(UpperCamelCase__ ) - start_measures[str(UpperCamelCase__ )]) / 2**2_0 return measures def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] ): """simple docstring""" print(f"""{description}:""" ) print(f"""- Time: {measures['time']:.2f}s""" ) for i in range(torch.cuda.device_count() ): print(f"""- GPU {i} allocated: {measures[str(UpperCamelCase__ )]:.2f}MiB""" ) __UpperCAmelCase = measures[f"""{i}-peak"""] print(f"""- GPU {i} peak: {peak:.2f}MiB""" ) print(f"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" ) print(f"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
720
'''simple docstring''' import heapq import sys import numpy as np __lowerCAmelCase : Any = tuple[int, int] class A : def __init__( self : Optional[int] ) -> int: __UpperCAmelCase = [] __UpperCAmelCase = set() def snake_case__ ( self : Optional[Any] ) -> List[Any]: if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def snake_case__ ( self : Dict ) -> Optional[int]: return len(self.elements ) == 0 def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]: if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(__a ) else: # update # print("update", item) __UpperCAmelCase = [] ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def snake_case__ ( self : int , __a : Any ) -> int: if item in self.set: self.set.remove(__a ) __UpperCAmelCase = [] ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def snake_case__ ( self : List[str] ) -> Dict: return self.elements[0][1] def snake_case__ ( self : Any ) -> List[str]: ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) self.set.remove(__a ) return (priority, item) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # euclidean distance __UpperCAmelCase = np.array(UpperCamelCase__ ) __UpperCAmelCase = np.array(UpperCamelCase__ ) return np.linalg.norm(a - b ) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # integer division by time variable return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ): """simple docstring""" __UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ ) return ans def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ): """simple docstring""" __UpperCAmelCase = np.chararray((n, n) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): __UpperCAmelCase = '''*''' for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): if (j, (n - 1) - i) in blocks: __UpperCAmelCase = '''#''' __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[goal] while x != start: ((__UpperCAmelCase) , (__UpperCAmelCase)) = x # print(x) __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[x] __UpperCAmelCase = '''-''' for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) __UpperCAmelCase = back_pointer[goal] while x != start: print(UpperCamelCase__ , end=''' ''' ) __UpperCAmelCase = back_pointer[x] print(UpperCamelCase__ ) sys.exit() def lowerCAmelCase ( UpperCamelCase__ : TPos ): """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ): """simple docstring""" for itera in range(UpperCamelCase__ ): open_list[itera].remove_element(UpperCamelCase__ ) # print("s", s) # print("j", j) ((__UpperCAmelCase) , (__UpperCAmelCase)) = s __UpperCAmelCase = (x - 1, y) __UpperCAmelCase = (x + 1, y) __UpperCAmelCase = (x, y + 1) __UpperCAmelCase = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(UpperCamelCase__ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(UpperCamelCase__ ) __UpperCAmelCase = -1 __UpperCAmelCase = float('''inf''' ) if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1: __UpperCAmelCase = g_function[s] + 1 __UpperCAmelCase = s if neighbours not in close_list_anchor: open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) ) if neighbours not in close_list_inad: for var in range(1 , UpperCamelCase__ ): if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key( UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ): open_list[j].put( UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(1_5 , 2_0 ): some_list.append((x, 1_7) ) for x in range(1_0 , 1_9 ): for y in range(1 , 1_5 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(1_2 , 1_9 ): some_list.append((x, y) ) for x in range(3 , 1_3 ): for y in range(1_6 , 1_9 ): some_list.append((x, y) ) return some_list __lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __lowerCAmelCase : List[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __lowerCAmelCase : Dict = make_common_ground() __lowerCAmelCase : int = blocks_blk # hyper parameters __lowerCAmelCase : Dict = 1 __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : Union[str, Any] = 20 __lowerCAmelCase : Any = 3 # one consistent and two other inconsistent # start and end destination __lowerCAmelCase : Optional[Any] = (0, 0) __lowerCAmelCase : Any = (n - 1, n - 1) __lowerCAmelCase : Optional[int] = 1 def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = {start: 0, goal: float('''inf''' )} __UpperCAmelCase = {start: -1, goal: -1} __UpperCAmelCase = [] __UpperCAmelCase = set() for i in range(UpperCamelCase__ ): open_list.append(PriorityQueue() ) open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) __UpperCAmelCase = [] __UpperCAmelCase = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , UpperCamelCase__ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show() visited.add(UpperCamelCase__ ) expand_state( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) close_list_inad.append(UpperCamelCase__ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __UpperCAmelCase = open_list[0].top_show() visited.add(UpperCamelCase__ ) expand_state( UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) close_list_anchor.append(UpperCamelCase__ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(UpperCamelCase__ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
654
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCAmelCase : Optional[int] = { "configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"], "feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"], "processing_wav2vec2": ["Wav2Vec2Processor"], "tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = [ "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ForAudioFrameClassification", "Wav2Vec2ForCTC", "Wav2Vec2ForMaskedLM", "Wav2Vec2ForPreTraining", "Wav2Vec2ForSequenceClassification", "Wav2Vec2ForXVector", "Wav2Vec2Model", "Wav2Vec2PreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = [ "TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWav2Vec2ForCTC", "TFWav2Vec2Model", "TFWav2Vec2PreTrainedModel", "TFWav2Vec2ForSequenceClassification", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Any = [ "FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys __lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
721
'''simple docstring''' import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py __lowerCAmelCase : List[Any] = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. __lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") __lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) __lowerCAmelCase : Optional[int] = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ] def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ ) return [m.group(0 ) for m in matches] def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __UpperCAmelCase = { config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. __UpperCAmelCase = collections.defaultdict(UpperCamelCase__ ) __UpperCAmelCase = collections.defaultdict(UpperCamelCase__ ) __UpperCAmelCase = collections.defaultdict(UpperCamelCase__ ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(UpperCamelCase__ ): __UpperCAmelCase = None if _re_tf_models.match(UpperCamelCase__ ) is not None: __UpperCAmelCase = tf_models __UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0] elif _re_flax_models.match(UpperCamelCase__ ) is not None: __UpperCAmelCase = flax_models __UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0] elif _re_pt_models.match(UpperCamelCase__ ) is not None: __UpperCAmelCase = pt_models __UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0] if lookup_dict is not None: while len(UpperCamelCase__ ) > 0: if attr_name in model_prefix_to_model_type: __UpperCAmelCase = True break # Try again after removing the last word in the name __UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] ) __UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) __UpperCAmelCase = list(UpperCamelCase__ ) all_models.sort() __UpperCAmelCase = {'''model_type''': all_models} __UpperCAmelCase = [pt_models[t] for t in all_models] __UpperCAmelCase = [tf_models[t] for t in all_models] __UpperCAmelCase = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure __UpperCAmelCase = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: __UpperCAmelCase = '''AutoProcessor''' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: __UpperCAmelCase = '''AutoTokenizer''' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: __UpperCAmelCase = '''AutoFeatureExtractor''' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. __UpperCAmelCase = '''AutoTokenizer''' __UpperCAmelCase = [processors[t] for t in all_models] return pd.DataFrame(UpperCamelCase__ ) def lowerCAmelCase ( UpperCamelCase__ : List[str] ): """simple docstring""" __UpperCAmelCase = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: __UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""] __UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): # The type of pipeline may not exist in this framework if not hasattr(UpperCamelCase__ , UpperCamelCase__ ): continue # First extract all model_names __UpperCAmelCase = [] for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): model_names.append(UpperCamelCase__ ) else: model_names.extend(list(UpperCamelCase__ ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ): """simple docstring""" __UpperCAmelCase = get_frameworks_table() __UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ ) __UpperCAmelCase = hf_hub_download( '''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ ) __UpperCAmelCase = Dataset.from_json(UpperCamelCase__ ) __UpperCAmelCase = { tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class''']) for i in range(len(UpperCamelCase__ ) ) } __UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. __UpperCAmelCase = sorted(table.keys() ) __UpperCAmelCase = pd.DataFrame( { '''model_class''': model_classes, '''pipeline_tag''': [table[m][0] for m in model_classes], '''auto_class''': [table[m][1] for m in model_classes], } ) __UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) ) tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) ) if commit_sha is not None: __UpperCAmelCase = ( f"""Update with commit {commit_sha}\n\nSee: """ f"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: __UpperCAmelCase = '''Update''' upload_folder( repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} __UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS __UpperCAmelCase = [] for key in pipeline_tasks: if key not in in_table: __UpperCAmelCase = pipeline_tasks[key]['''pt'''] if isinstance(UpperCamelCase__ , (list, tuple) ): __UpperCAmelCase = model[0] __UpperCAmelCase = model.__name__ if model not in in_table.values(): missing.append(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: __UpperCAmelCase = ''', '''.join(UpperCamelCase__ ) raise ValueError( '''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ''' f"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") __lowerCAmelCase : Tuple = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
654
0
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) __lowerCAmelCase : str = 299_792_458 # Symbols __lowerCAmelCase : Any = symbols("ct x y z") def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 ) def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" return np.array( [ [gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0], [-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ): """simple docstring""" # Ensure event is not empty if event is None: __UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(UpperCamelCase__ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: __lowerCAmelCase : Dict = transform(29_979_245) print("Example of four vector: ") print(F"""ct' = {four_vector[0]}""") print(F"""x' = {four_vector[1]}""") print(F"""y' = {four_vector[2]}""") print(F"""z' = {four_vector[3]}""") # Substitute symbols with numerical values __lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1} __lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)] print(F"""\n{numerical_vector}""")
700
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) __lowerCAmelCase : Optional[int] = [ "cross_validation.py", "gradient_accumulation.py", "local_sgd.py", "multi_process_metrics.py", "memory.py", "automatic_gradient_accumulation.py", "fsdp_with_peak_mem_tracking.py", "deepspeed_with_config_support.py", "megatron_lm_gpt_pretraining.py", ] class A ( unittest.TestCase ): def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple: __UpperCAmelCase = None __UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) __UpperCAmelCase = os.path.abspath('''examples''' ) for item in os.listdir(__a ): if item not in EXCLUDE_EXAMPLES: __UpperCAmelCase = os.path.join(__a , __a ) if os.path.isfile(__a ) and ".py" in item_path: with self.subTest( tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ): __UpperCAmelCase = compare_against_test( os.path.join(__a , __a ) , __a , __a , __a ) __UpperCAmelCase = '''\n'''.join(__a ) if special_strings is not None: for string in special_strings: __UpperCAmelCase = diff.replace(__a , '''''' ) self.assertEqual(__a , '''''' ) def snake_case__ ( self : Optional[Any] ) -> str: self.one_complete_example('''complete_nlp_example.py''' , __a ) self.one_complete_example('''complete_nlp_example.py''' , __a ) def snake_case__ ( self : List[str] ) -> Tuple: __UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) __UpperCAmelCase = [ ''' ''' * 1_6 + '''{\n\n''', ''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 2_0 + '''"epoch": epoch,\n\n''', ''' ''' * 1_6 + '''},\n\n''', ''' ''' * 1_6 + '''step=epoch,\n''', ''' ''' * 1_2, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a ) self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a ) @mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} ) class A ( UpperCAmelCase ): a_ = False @classmethod def snake_case__ ( cls : Tuple ) -> str: super().setUpClass() __UpperCAmelCase = tempfile.mkdtemp() __UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) __UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case__ ( cls : Dict ) -> int: super().tearDownClass() shutil.rmtree(cls._tmpdir ) def snake_case__ ( self : Tuple ) -> Dict: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def snake_case__ ( self : str ) -> int: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() __UpperCAmelCase = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def snake_case__ ( self : Any ) -> Any: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )} """.split() __UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a ) self.assertNotIn('''epoch 0:''' , __a ) self.assertIn('''epoch 1:''' , __a ) def snake_case__ ( self : Tuple ) -> Optional[int]: __UpperCAmelCase = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )} """.split() __UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a ) if torch.cuda.is_available(): __UpperCAmelCase = torch.cuda.device_count() else: __UpperCAmelCase = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , __a ) self.assertIn('''epoch 1:''' , __a ) else: self.assertIn('''epoch 0:''' , __a ) self.assertIn('''epoch 1:''' , __a ) @slow def snake_case__ ( self : Any ) -> Optional[Any]: __UpperCAmelCase = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): __UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a ) __UpperCAmelCase = re.findall('''({.+})''' , __a ) __UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1] __UpperCAmelCase = ast.literal_eval(__a ) self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 ) def snake_case__ ( self : Dict ) -> int: __UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Optional[Any] ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmpdir: __UpperCAmelCase = f""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) ) def snake_case__ ( self : Optional[int] ) -> List[Any]: __UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def snake_case__ ( self : Tuple ) -> Optional[Any]: __UpperCAmelCase = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
654
0
import math import random from typing import Any from .hill_climbing import SearchProblem def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : bool = True , UpperCamelCase__ : float = math.inf , UpperCamelCase__ : float = -math.inf , UpperCamelCase__ : float = math.inf , UpperCamelCase__ : float = -math.inf , UpperCamelCase__ : bool = False , UpperCamelCase__ : float = 1_0_0 , UpperCamelCase__ : float = 0.01 , UpperCamelCase__ : float = 1 , ): """simple docstring""" __UpperCAmelCase = False __UpperCAmelCase = search_prob __UpperCAmelCase = start_temperate __UpperCAmelCase = [] __UpperCAmelCase = 0 __UpperCAmelCase = None while not search_end: __UpperCAmelCase = current_state.score() if best_state is None or current_score > best_state.score(): __UpperCAmelCase = current_state scores.append(UpperCamelCase__ ) iterations += 1 __UpperCAmelCase = None __UpperCAmelCase = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to __UpperCAmelCase = random.randint(0 , len(UpperCamelCase__ ) - 1 ) # picking a random neighbor __UpperCAmelCase = neighbors.pop(UpperCamelCase__ ) __UpperCAmelCase = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: __UpperCAmelCase = change * -1 # in case we are finding minimum if change > 0: # improves the solution __UpperCAmelCase = picked_neighbor else: __UpperCAmelCase = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability __UpperCAmelCase = picked_neighbor __UpperCAmelCase = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor __UpperCAmelCase = True else: __UpperCAmelCase = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(UpperCamelCase__ ) , UpperCamelCase__ ) plt.xlabel('''Iterations''' ) plt.ylabel('''Function values''' ) plt.show() return best_state if __name__ == "__main__": def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ): """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) __lowerCAmelCase : List[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) __lowerCAmelCase : List[str] = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( "The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 " F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) # starting the problem with initial coordinates (12, 47) __lowerCAmelCase : List[str] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) __lowerCAmelCase : int = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( "The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 " F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ): """simple docstring""" return (3 * x**2) - (6 * y) __lowerCAmelCase : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) __lowerCAmelCase : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True) print( "The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: " F"""{local_min.score()}""" ) __lowerCAmelCase : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) __lowerCAmelCase : List[str] = simulated_annealing(prob, find_max=True, visualization=True) print( "The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: " F"""{local_min.score()}""" )
701
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva __lowerCAmelCase : Any = "" __lowerCAmelCase : int = "" __lowerCAmelCase : Union[str, Any] = "" __lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ ) print('''Processing...''' ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for index, image in enumerate(UpperCamelCase__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __UpperCAmelCase = random_chars(3_2 ) __UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] ) print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" ) __UpperCAmelCase = [] for anno in new_annos[index]: __UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(UpperCamelCase__ ) with open(f"""/{file_root}.txt""" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ): """simple docstring""" __UpperCAmelCase = [] __UpperCAmelCase = [] for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ): __UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(UpperCamelCase__ ) as in_file: __UpperCAmelCase = in_file.readlines() __UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" ) __UpperCAmelCase = [] for obj_list in obj_lists: __UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(UpperCamelCase__ ) labels.append(UpperCamelCase__ ) return img_paths, labels def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ): """simple docstring""" __UpperCAmelCase = [] __UpperCAmelCase = [] __UpperCAmelCase = [] for idx in range(len(UpperCamelCase__ ) ): __UpperCAmelCase = [] __UpperCAmelCase = img_list[idx] path_list.append(UpperCamelCase__ ) __UpperCAmelCase = anno_list[idx] __UpperCAmelCase = cva.imread(UpperCamelCase__ ) if flip_type == 1: __UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ ) for bbox in img_annos: __UpperCAmelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ ) for bbox in img_annos: __UpperCAmelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(UpperCamelCase__ ) new_imgs_list.append(UpperCamelCase__ ) return new_imgs_list, new_annos_lists, path_list def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" __UpperCAmelCase = ascii_lowercase + digits return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) ) if __name__ == "__main__": main() print("DONE ✅")
654
0
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" if number < 0: raise ValueError('''number must not be negative''' ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
702
'''simple docstring''' from pathlib import Path import fire def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = Path(UpperCamelCase__ ) __UpperCAmelCase = Path(UpperCamelCase__ ) dest_dir.mkdir(exist_ok=UpperCamelCase__ ) for path in src_dir.iterdir(): __UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n] __UpperCAmelCase = dest_dir.joinpath(path.name ) print(UpperCamelCase__ ) dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) ) if __name__ == "__main__": fire.Fire(minify)
654
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor __lowerCAmelCase : Optional[int] = logging.get_logger(__name__) class A ( UpperCAmelCase ): def __init__( self : str , *__a : Union[str, Any] , **__a : str ) -> None: warnings.warn( '''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use OwlViTImageProcessor instead.''' , __a , ) super().__init__(*__a , **__a )
703
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): __UpperCAmelCase = f"""Input value of [number={number}] must be an integer""" raise TypeError(UpperCamelCase__ ) if number < 1: __UpperCAmelCase = f"""Input value of [number={number}] must be > 0""" raise ValueError(UpperCamelCase__ ) __UpperCAmelCase = 1 for i in range(1 , UpperCamelCase__ ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
654
0
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ): """simple docstring""" __UpperCAmelCase = {} if train_file is not None: __UpperCAmelCase = [train_file] if eval_file is not None: __UpperCAmelCase = [eval_file] if test_file is not None: __UpperCAmelCase = [test_file] __UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ ) __UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() ) __UpperCAmelCase = features_name.pop(UpperCamelCase__ ) __UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) ) __UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )} __UpperCAmelCase = tokenizer.model_input_names __UpperCAmelCase = {} if len(UpperCamelCase__ ) == 1: for k in files.keys(): __UpperCAmelCase = ds[k].map( lambda UpperCamelCase__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , ) elif len(UpperCamelCase__ ) == 2: for k in files.keys(): __UpperCAmelCase = ds[k].map( lambda UpperCamelCase__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid __lowerCAmelCase : List[Any] = logging.getLogger(__name__) @dataclass class A : a_ = field(metadata={'''help''': '''Which column contains the label'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} ) a_ = field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class A : a_ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ f"""16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , ) def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict: __UpperCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __UpperCAmelCase = TFTrainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCAmelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __UpperCAmelCase = trainer.evaluate() __UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(UpperCamelCase__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) results.update(UpperCamelCase__ ) return results if __name__ == "__main__": main()
704
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowerCAmelCase ( ): """simple docstring""" raise RuntimeError('''CUDA out of memory.''' ) class A ( nn.Module ): def __init__( self : Optional[Any] ) -> int: super().__init__() __UpperCAmelCase = nn.Linear(3 , 4 ) __UpperCAmelCase = nn.BatchNormad(4 ) __UpperCAmelCase = nn.Linear(4 , 5 ) def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]: return self.lineara(self.batchnorm(self.lineara(__a ) ) ) class A ( unittest.TestCase ): def snake_case__ ( self : Optional[int] ) -> Any: __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_2_8 ) def mock_training_loop_function(__a : Union[str, Any] ): nonlocal batch_sizes batch_sizes.append(__a ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] ) def snake_case__ ( self : str ) -> int: __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_2_8 ) def mock_training_loop_function(__a : str , __a : Optional[int] ): nonlocal batch_sizes batch_sizes.append(__a ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga __UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' ) self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def snake_case__ ( self : Any ) -> int: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(__a : Optional[int] ): pass with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def snake_case__ ( self : Any ) -> List[Any]: @find_executable_batch_size(starting_batch_size=1_6 ) def mock_training_loop_function(__a : Dict ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def snake_case__ ( self : List[Any] ) -> List[str]: @find_executable_batch_size(starting_batch_size=1_2_8 ) def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(__a ) as cm: mock_training_loop_function(1_2_8 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def snake_case__ ( self : Tuple ) -> Optional[Any]: @find_executable_batch_size(starting_batch_size=1_6 ) def mock_training_loop_function(__a : Tuple ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(__a ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def snake_case__ ( self : Any ) -> List[Any]: __UpperCAmelCase = torch.cuda.memory_allocated() __UpperCAmelCase = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , __a ) __UpperCAmelCase = release_memory(__a ) self.assertEqual(torch.cuda.memory_allocated() , __a )
654
0
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( UpperCAmelCase ): a_ = (DEISMultistepScheduler,) a_ = (('''num_inference_steps''', 2_5),) def snake_case__ ( self : int , **__a : Union[str, Any] ) -> int: __UpperCAmelCase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, } config.update(**__a ) return config def snake_case__ ( self : Optional[Any] , __a : Dict=0 , **__a : int ) -> List[Any]: __UpperCAmelCase = dict(self.forward_default_kwargs ) __UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __a ) __UpperCAmelCase = self.dummy_sample __UpperCAmelCase = 0.1 * sample __UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: __UpperCAmelCase = self.get_scheduler_config(**__a ) __UpperCAmelCase = scheduler_class(**__a ) scheduler.set_timesteps(__a ) # copy over dummy past residuals __UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__a ) __UpperCAmelCase = scheduler_class.from_pretrained(__a ) new_scheduler.set_timesteps(__a ) # copy over dummy past residuals __UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] __UpperCAmelCase , __UpperCAmelCase = sample, sample for t in range(__a , time_step + scheduler.config.solver_order + 1 ): __UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample __UpperCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def snake_case__ ( self : Union[str, Any] ) -> Dict: pass def snake_case__ ( self : str , __a : Dict=0 , **__a : List[Any] ) -> str: __UpperCAmelCase = dict(self.forward_default_kwargs ) __UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __a ) __UpperCAmelCase = self.dummy_sample __UpperCAmelCase = 0.1 * sample __UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: __UpperCAmelCase = self.get_scheduler_config() __UpperCAmelCase = scheduler_class(**__a ) scheduler.set_timesteps(__a ) # copy over dummy past residuals (must be after setting timesteps) __UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__a ) __UpperCAmelCase = scheduler_class.from_pretrained(__a ) # copy over dummy past residuals new_scheduler.set_timesteps(__a ) # copy over dummy past residual (must be after setting timesteps) __UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] __UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample __UpperCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def snake_case__ ( self : List[Any] , __a : List[str]=None , **__a : List[str] ) -> Optional[int]: if scheduler is None: __UpperCAmelCase = self.scheduler_classes[0] __UpperCAmelCase = self.get_scheduler_config(**__a ) __UpperCAmelCase = scheduler_class(**__a ) __UpperCAmelCase = self.scheduler_classes[0] __UpperCAmelCase = self.get_scheduler_config(**__a ) __UpperCAmelCase = scheduler_class(**__a ) __UpperCAmelCase = 1_0 __UpperCAmelCase = self.dummy_model() __UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__a ) for i, t in enumerate(scheduler.timesteps ): __UpperCAmelCase = model(__a , __a ) __UpperCAmelCase = scheduler.step(__a , __a , __a ).prev_sample return sample def snake_case__ ( self : Dict ) -> Optional[Any]: __UpperCAmelCase = dict(self.forward_default_kwargs ) __UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __a ) for scheduler_class in self.scheduler_classes: __UpperCAmelCase = self.get_scheduler_config() __UpperCAmelCase = scheduler_class(**__a ) __UpperCAmelCase = self.dummy_sample __UpperCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(__a , '''set_timesteps''' ): scheduler.set_timesteps(__a ) elif num_inference_steps is not None and not hasattr(__a , '''set_timesteps''' ): __UpperCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] __UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] __UpperCAmelCase = scheduler.timesteps[5] __UpperCAmelCase = scheduler.timesteps[6] __UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample __UpperCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def snake_case__ ( self : Union[str, Any] ) -> str: # make sure that iterating over schedulers with same config names gives same results # for defaults __UpperCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() ) __UpperCAmelCase = self.full_loop(scheduler=__a ) __UpperCAmelCase = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 __UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) __UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) __UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) __UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) __UpperCAmelCase = self.full_loop(scheduler=__a ) __UpperCAmelCase = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def snake_case__ ( self : List[Any] ) -> Optional[int]: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__a ) def snake_case__ ( self : Dict ) -> List[str]: self.check_over_configs(thresholding=__a ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__a , prediction_type=__a , sample_max_value=__a , algorithm_type='''deis''' , solver_order=__a , solver_type=__a , ) def snake_case__ ( self : List[str] ) -> int: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def snake_case__ ( self : Optional[Any] ) -> int: for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , ) __UpperCAmelCase = self.full_loop( solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , ) assert not torch.isnan(__a ).any(), "Samples have nan numbers" def snake_case__ ( self : List[str] ) -> Dict: self.check_over_configs(lower_order_final=__a ) self.check_over_configs(lower_order_final=__a ) def snake_case__ ( self : str ) -> Tuple: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__a , time_step=0 ) def snake_case__ ( self : int ) -> List[Any]: __UpperCAmelCase = self.full_loop() __UpperCAmelCase = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def snake_case__ ( self : Tuple ) -> int: __UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) __UpperCAmelCase = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3 def snake_case__ ( self : int ) -> List[Any]: __UpperCAmelCase = self.scheduler_classes[0] __UpperCAmelCase = self.get_scheduler_config(thresholding=__a , dynamic_thresholding_ratio=0 ) __UpperCAmelCase = scheduler_class(**__a ) __UpperCAmelCase = 1_0 __UpperCAmelCase = self.dummy_model() __UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__a ) for i, t in enumerate(scheduler.timesteps ): __UpperCAmelCase = model(__a , __a ) __UpperCAmelCase = scheduler.step(__a , __a , __a ).prev_sample assert sample.dtype == torch.floataa
705
'''simple docstring''' from __future__ import annotations import math def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = u for i in range(1 , UpperCamelCase__ ): __UpperCAmelCase = temp * (u - i) return temp def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = int(input('''enter the numbers of values: ''' ) ) __UpperCAmelCase = [] for _ in range(UpperCamelCase__ ): y.append([] ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): y[i].append(UpperCamelCase__ ) __UpperCAmelCase = 0 print('''enter the values of parameters in a list: ''' ) __UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) ) print('''enter the values of corresponding parameters: ''' ) for i in range(UpperCamelCase__ ): __UpperCAmelCase = float(input() ) __UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) ) __UpperCAmelCase = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , UpperCamelCase__ ): for j in range(n - i ): __UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1] __UpperCAmelCase = y[0][0] for i in range(1 , UpperCamelCase__ ): summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ ) print(f"""the value at {value} is {summ}""" ) if __name__ == "__main__": main()
654
0
'''simple docstring''' import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration __lowerCAmelCase : Optional[int] = 500_000 __lowerCAmelCase : str = os.path.split(__file__) __lowerCAmelCase : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def lowerCAmelCase ( UpperCamelCase__ : datasets.Dataset , **UpperCamelCase__ : List[str] ): """simple docstring""" __UpperCAmelCase = dataset.map(**UpperCamelCase__ ) @get_duration def lowerCAmelCase ( UpperCamelCase__ : datasets.Dataset , **UpperCamelCase__ : Optional[Any] ): """simple docstring""" __UpperCAmelCase = dataset.filter(**UpperCamelCase__ ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = {'''num examples''': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} ) __UpperCAmelCase = generate_example_dataset( os.path.join(UpperCamelCase__ , '''dataset.arrow''' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ ) __UpperCAmelCase = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase__ ) def tokenize(UpperCamelCase__ : Optional[Any] ): return tokenizer(examples['''text'''] ) __UpperCAmelCase = map(UpperCamelCase__ ) __UpperCAmelCase = map(UpperCamelCase__ , batched=UpperCamelCase__ ) __UpperCAmelCase = map(UpperCamelCase__ , function=lambda UpperCamelCase__ : None , batched=UpperCamelCase__ ) with dataset.formatted_as(type='''numpy''' ): __UpperCAmelCase = map(UpperCamelCase__ , function=lambda UpperCamelCase__ : None , batched=UpperCamelCase__ ) with dataset.formatted_as(type='''pandas''' ): __UpperCAmelCase = map(UpperCamelCase__ , function=lambda UpperCamelCase__ : None , batched=UpperCamelCase__ ) with dataset.formatted_as(type='''torch''' , columns='''numbers''' ): __UpperCAmelCase = map(UpperCamelCase__ , function=lambda UpperCamelCase__ : None , batched=UpperCamelCase__ ) with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ): __UpperCAmelCase = map(UpperCamelCase__ , function=lambda UpperCamelCase__ : None , batched=UpperCamelCase__ ) __UpperCAmelCase = map(UpperCamelCase__ , function=UpperCamelCase__ , batched=UpperCamelCase__ ) __UpperCAmelCase = filter(UpperCamelCase__ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(UpperCamelCase__ , '''wb''' ) as f: f.write(json.dumps(UpperCamelCase__ ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
706
'''simple docstring''' import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : Dict = logging.get_logger(__name__) def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ): """simple docstring""" __UpperCAmelCase = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''), ('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''), ('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''), ('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''), ('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''), ('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''), ] ) return rename_keys def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ): """simple docstring""" for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) __UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" ) __UpperCAmelCase = in_proj_weight[ : encoder_config.hidden_size, : ] __UpperCAmelCase = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] __UpperCAmelCase = in_proj_weight[ -encoder_config.hidden_size :, : ] def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ): """simple docstring""" __UpperCAmelCase = dct.pop(UpperCamelCase__ ) __UpperCAmelCase = val def lowerCAmelCase ( UpperCamelCase__ : Dict ): """simple docstring""" if "handwritten" in checkpoint_url: __UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: __UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg''' __UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' ) return im @torch.no_grad() def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ): """simple docstring""" __UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ ) __UpperCAmelCase = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: __UpperCAmelCase = 7_6_8 elif "large" in checkpoint_url: # use ViT-large encoder __UpperCAmelCase = 1_0_2_4 __UpperCAmelCase = 4_0_9_6 __UpperCAmelCase = 2_4 __UpperCAmelCase = 1_6 __UpperCAmelCase = 1_0_2_4 else: raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: __UpperCAmelCase = False __UpperCAmelCase = '''relu''' __UpperCAmelCase = 1_0_2_4 __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = False # load HuggingFace model __UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ ) __UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ ) __UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) model.eval() # load state_dict of original model, rename some keys __UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model'''] __UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): __UpperCAmelCase = state_dict.pop(UpperCamelCase__ ) if key.startswith('''decoder''' ) and "output_projection" not in key: __UpperCAmelCase = val else: __UpperCAmelCase = val # load state dict model.load_state_dict(UpperCamelCase__ ) # Check outputs on an image __UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size ) __UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' ) __UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ ) __UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values # verify logits __UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) __UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ) __UpperCAmelCase = outputs.logits __UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] ) if "trocr-base-handwritten" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] ) elif "trocr-large-handwritten" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] ) elif "trocr-base-printed" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] ) elif "trocr-large-printed" in checkpoint_url: __UpperCAmelCase = torch.tensor( [-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected" Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(UpperCamelCase__ ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) __lowerCAmelCase : Optional[int] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
654
0
'''simple docstring''' import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL __lowerCAmelCase : str = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : tuple , UpperCamelCase__ : Path , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=False , ): """simple docstring""" output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , ) else: export( UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , ) @torch.no_grad() def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : bool = False ): """simple docstring""" __UpperCAmelCase = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): __UpperCAmelCase = '''cuda''' elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' ) else: __UpperCAmelCase = '''cpu''' __UpperCAmelCase = Path(UpperCamelCase__ ) # VAE DECODER __UpperCAmelCase = AutoencoderKL.from_pretrained(model_path + '''/vae''' ) __UpperCAmelCase = vae_decoder.config.latent_channels # forward only through the decoder part __UpperCAmelCase = vae_decoder.decode onnx_export( UpperCamelCase__ , model_args=( torch.randn(1 , UpperCamelCase__ , 2_5 , 2_5 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=UpperCamelCase__ , ) del vae_decoder if __name__ == "__main__": __lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") __lowerCAmelCase : Optional[int] = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
707
'''simple docstring''' import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class A ( unittest.TestCase ): def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]: return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy""" def snake_case__ ( self : Dict ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple: __UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa __UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a ) return image def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any: __UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa __UpperCAmelCase = '''bf16''' if fpaa else None __UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained( __a , subfolder='''unet''' , dtype=__a , revision=__a ) return model, params def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]: __UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa __UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]], [1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]], [8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]], [3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]], # fmt: on ] ) def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any: __UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a ) __UpperCAmelCase = self.get_latents(__a , fpaa=__a ) __UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a ) __UpperCAmelCase = model.apply( {'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample assert sample.shape == latents.shape __UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__a , __a , atol=1e-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]], [1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]], [8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]], [3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]], # fmt: on ] ) def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a ) __UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a ) __UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a ) __UpperCAmelCase = model.apply( {'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample assert sample.shape == latents.shape __UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__a , __a , atol=1e-2 )
654
0
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : int = 5_0 ): """simple docstring""" __UpperCAmelCase = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(F"""{solution() = }""")
708
'''simple docstring''' import argparse import os import re import packaging.version __lowerCAmelCase : Optional[int] = "examples/" __lowerCAmelCase : Dict = { "examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","), "doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"), } __lowerCAmelCase : List[str] = { "init": "src/transformers/__init__.py", "setup": "setup.py", } __lowerCAmelCase : int = "README.md" def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ): """simple docstring""" with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __UpperCAmelCase = f.read() __UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern] __UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ ) __UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(UpperCamelCase__ ) def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ): """simple docstring""" for folder, directories, fnames in os.walk(UpperCamelCase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' ) def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if not patch: update_version_in_examples(UpperCamelCase__ ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = '''🤗 Transformers currently provides the following architectures''' __UpperCAmelCase = '''1. Want to contribute a new model?''' with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __UpperCAmelCase = f.readlines() # Find the start of the list. __UpperCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __UpperCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): __UpperCAmelCase = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(UpperCamelCase__ ) def lowerCAmelCase ( ): """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: __UpperCAmelCase = f.read() __UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0] return packaging.version.parse(UpperCamelCase__ ) def lowerCAmelCase ( UpperCamelCase__ : Any=False ): """simple docstring""" __UpperCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: __UpperCAmelCase = default_version.base_version elif patch: __UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" ) if len(UpperCamelCase__ ) == 0: __UpperCAmelCase = default_version print(f"""Updating version to {version}.""" ) global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = get_version() __UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __UpperCAmelCase = current_version.base_version # Check with the user we got that right. __UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(UpperCamelCase__ ) == 0: __UpperCAmelCase = dev_version print(f"""Updating version to {version}.""" ) global_version_update(UpperCamelCase__ ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": __lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") __lowerCAmelCase : Tuple = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
654
0
'''simple docstring''' from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase ): a_ = 4_2 class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase , UpperCAmelCase ): @register_to_config def __init__( self : List[str] , __a : int = 3_2 , __a : int = 6_4 , __a : int = 2_0 , __a : int = 7_6_8 , __a : Union[str, Any]=7_7 , __a : Any=4 , __a : float = 0.0 , __a : str = "silu" , __a : Optional[str] = None , __a : Optional[str] = None , __a : Optional[str] = "linear" , __a : Optional[str] = "prd" , __a : Optional[int] = None , __a : Optional[int] = None , __a : Optional[int] = None , ) -> Union[str, Any]: super().__init__() __UpperCAmelCase = num_attention_heads __UpperCAmelCase = attention_head_dim __UpperCAmelCase = num_attention_heads * attention_head_dim __UpperCAmelCase = additional_embeddings __UpperCAmelCase = time_embed_dim or inner_dim __UpperCAmelCase = embedding_proj_dim or embedding_dim __UpperCAmelCase = clip_embed_dim or embedding_dim __UpperCAmelCase = Timesteps(__a , __a , 0 ) __UpperCAmelCase = TimestepEmbedding(__a , __a , out_dim=__a , act_fn=__a ) __UpperCAmelCase = nn.Linear(__a , __a ) if embedding_proj_norm_type is None: __UpperCAmelCase = None elif embedding_proj_norm_type == "layer": __UpperCAmelCase = nn.LayerNorm(__a ) else: raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" ) __UpperCAmelCase = nn.Linear(__a , __a ) if encoder_hid_proj_type is None: __UpperCAmelCase = None elif encoder_hid_proj_type == "linear": __UpperCAmelCase = nn.Linear(__a , __a ) else: raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" ) __UpperCAmelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __a ) ) if added_emb_type == "prd": __UpperCAmelCase = nn.Parameter(torch.zeros(1 , 1 , __a ) ) elif added_emb_type is None: __UpperCAmelCase = None else: raise ValueError( f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" ) __UpperCAmelCase = nn.ModuleList( [ BasicTransformerBlock( __a , __a , __a , dropout=__a , activation_fn='''gelu''' , attention_bias=__a , ) for d in range(__a ) ] ) if norm_in_type == "layer": __UpperCAmelCase = nn.LayerNorm(__a ) elif norm_in_type is None: __UpperCAmelCase = None else: raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" ) __UpperCAmelCase = nn.LayerNorm(__a ) __UpperCAmelCase = nn.Linear(__a , __a ) __UpperCAmelCase = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 ) causal_attention_mask.triu_(1 ) __UpperCAmelCase = causal_attention_mask[None, ...] self.register_buffer('''causal_attention_mask''' , __a , persistent=__a ) __UpperCAmelCase = nn.Parameter(torch.zeros(1 , __a ) ) __UpperCAmelCase = nn.Parameter(torch.zeros(1 , __a ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def snake_case__ ( self : Dict ) -> Dict[str, AttentionProcessor]: __UpperCAmelCase = {} def fn_recursive_add_processors(__a : str , __a : torch.nn.Module , __a : Dict[str, AttentionProcessor] ): if hasattr(__a , '''set_processor''' ): __UpperCAmelCase = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"""{name}.{sub_name}""" , __a , __a ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__a , __a , __a ) return processors def snake_case__ ( self : Optional[Any] , __a : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Any: __UpperCAmelCase = len(self.attn_processors.keys() ) if isinstance(__a , __a ) and len(__a ) != count: raise ValueError( f"""A dict of processors was passed, but the number of processors {len(__a )} does not match the""" f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(__a : str , __a : torch.nn.Module , __a : Union[str, Any] ): if hasattr(__a , '''set_processor''' ): if not isinstance(__a , __a ): module.set_processor(__a ) else: module.set_processor(processor.pop(f"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"""{name}.{sub_name}""" , __a , __a ) for name, module in self.named_children(): fn_recursive_attn_processor(__a , __a , __a ) def snake_case__ ( self : Any ) -> int: self.set_attn_processor(AttnProcessor() ) def snake_case__ ( self : List[str] , __a : str , __a : Union[torch.Tensor, float, int] , __a : torch.FloatTensor , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.BoolTensor] = None , __a : bool = True , ) -> str: __UpperCAmelCase = hidden_states.shape[0] __UpperCAmelCase = timestep if not torch.is_tensor(__a ): __UpperCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(__a ) and len(timesteps.shape ) == 0: __UpperCAmelCase = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __UpperCAmelCase = timesteps * torch.ones(__a , dtype=timesteps.dtype , device=timesteps.device ) __UpperCAmelCase = self.time_proj(__a ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __UpperCAmelCase = timesteps_projected.to(dtype=self.dtype ) __UpperCAmelCase = self.time_embedding(__a ) if self.embedding_proj_norm is not None: __UpperCAmelCase = self.embedding_proj_norm(__a ) __UpperCAmelCase = self.embedding_proj(__a ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __UpperCAmelCase = self.encoder_hidden_states_proj(__a ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' ) __UpperCAmelCase = self.proj_in(__a ) __UpperCAmelCase = self.positional_embedding.to(hidden_states.dtype ) __UpperCAmelCase = [] __UpperCAmelCase = 0 if encoder_hidden_states is not None: additional_embeds.append(__a ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: __UpperCAmelCase = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: __UpperCAmelCase = hidden_states[:, None, :] __UpperCAmelCase = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __UpperCAmelCase = self.prd_embedding.to(hidden_states.dtype ).expand(__a , -1 , -1 ) additional_embeds.append(__a ) __UpperCAmelCase = torch.cat( __a , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __UpperCAmelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __UpperCAmelCase = F.pad( __a , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __UpperCAmelCase = hidden_states + positional_embeddings if attention_mask is not None: __UpperCAmelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0 __UpperCAmelCase = F.pad(__a , (0, self.additional_embeddings) , value=0.0 ) __UpperCAmelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) __UpperCAmelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: __UpperCAmelCase = self.norm_in(__a ) for block in self.transformer_blocks: __UpperCAmelCase = block(__a , attention_mask=__a ) __UpperCAmelCase = self.norm_out(__a ) if self.prd_embedding is not None: __UpperCAmelCase = hidden_states[:, -1] else: __UpperCAmelCase = hidden_states[:, additional_embeddings_len:] __UpperCAmelCase = self.proj_to_clip_embeddings(__a ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=__a ) def snake_case__ ( self : str , __a : int ) -> str: __UpperCAmelCase = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
709
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : Tuple ): """simple docstring""" # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection __UpperCAmelCase = len(UpperCamelCase__ ) __UpperCAmelCase = max(UpperCamelCase__ ) __UpperCAmelCase = min(UpperCamelCase__ ) # create the counting array __UpperCAmelCase = coll_max + 1 - coll_min __UpperCAmelCase = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , UpperCamelCase__ ): __UpperCAmelCase = counting_arr[i] + counting_arr[i - 1] # create the output collection __UpperCAmelCase = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , UpperCamelCase__ ) ): __UpperCAmelCase = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def lowerCAmelCase ( UpperCamelCase__ : Any ): """simple docstring""" return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt" __lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip() __lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")] print(counting_sort(unsorted))
654
0
'''simple docstring''' import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class A : @staticmethod def snake_case__ ( *__a : Dict , **__a : Tuple ) -> int: pass @is_pipeline_test @require_torch @require_vision class A ( unittest.TestCase ): a_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def snake_case__ ( self : Optional[int] , __a : Optional[int] , __a : Optional[int] , __a : Optional[Any] ) -> List[str]: __UpperCAmelCase = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' ) __UpperCAmelCase = [ { '''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''question''': '''How many cats are there?''', }, { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''question''': '''How many cats are there?''', }, ] return vqa_pipeline, examples def snake_case__ ( self : List[str] , __a : Tuple , __a : Any ) -> Dict: __UpperCAmelCase = vqa_pipeline(__a , top_k=1 ) self.assertEqual( __a , [ [{'''score''': ANY(__a ), '''answer''': ANY(__a )}], [{'''score''': ANY(__a ), '''answer''': ANY(__a )}], ] , ) @require_torch def snake_case__ ( self : Tuple ) -> Union[str, Any]: __UpperCAmelCase = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' ) __UpperCAmelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' __UpperCAmelCase = '''How many cats are there?''' __UpperCAmelCase = vqa_pipeline(image=__a , question='''How many cats are there?''' , top_k=2 ) self.assertEqual( __a , [{'''score''': ANY(__a ), '''answer''': ANY(__a )}, {'''score''': ANY(__a ), '''answer''': ANY(__a )}] ) __UpperCAmelCase = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( __a , [{'''score''': ANY(__a ), '''answer''': ANY(__a )}, {'''score''': ANY(__a ), '''answer''': ANY(__a )}] ) @slow @require_torch def snake_case__ ( self : Optional[Any] ) -> List[Any]: __UpperCAmelCase = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' ) __UpperCAmelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' __UpperCAmelCase = '''How many cats are there?''' __UpperCAmelCase = vqa_pipeline(image=__a , question=__a , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] ) __UpperCAmelCase = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] ) __UpperCAmelCase = vqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , ) @require_tf @unittest.skip('''Visual question answering not implemented in TF''' ) def snake_case__ ( self : Tuple ) -> str: pass
710
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ): """simple docstring""" __UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" __UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' ) __UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
654
0
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def lowerCAmelCase ( UpperCamelCase__ : List[str] ): """simple docstring""" if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A ( nn.Module ): def __init__( self : Tuple , __a : nn.Module , __a : int ) -> Optional[int]: super().__init__() __UpperCAmelCase = module __UpperCAmelCase = nn.Sequential( nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , ) __UpperCAmelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=__a ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def snake_case__ ( self : List[Any] , __a : List[Any] , *__a : Tuple , **__a : Dict ) -> str: return self.module(__a , *__a , **__a ) + self.adapter(__a ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module a_ = '''bigscience/bloom-1b7''' # Constant values a_ = 2.109_659_552_692_574 a_ = '''Hello my name is''' a_ = set() EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' ) EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' ) EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' ) a_ = 1_0 def snake_case__ ( self : str ) -> str: # Models and tokenizer __UpperCAmelCase = AutoTokenizer.from_pretrained(self.model_name ) class A ( UpperCAmelCase ): def snake_case__ ( self : Dict ) -> List[str]: super().setUp() # Models and tokenizer __UpperCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) __UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='''auto''' ) def snake_case__ ( self : str ) -> Tuple: del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : Optional[int] ) -> List[str]: __UpperCAmelCase = self.model_abit.config self.assertTrue(hasattr(__a , '''quantization_config''' ) ) __UpperCAmelCase = config.to_dict() __UpperCAmelCase = config.to_diff_dict() __UpperCAmelCase = config.to_json_string() def snake_case__ ( self : Tuple ) -> Any: from bitsandbytes.nn import Paramsabit __UpperCAmelCase = self.model_fpaa.get_memory_footprint() __UpperCAmelCase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __UpperCAmelCase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def snake_case__ ( self : Optional[Any] ) -> Optional[Any]: from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__a , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def snake_case__ ( self : List[Any] ) -> Dict: __UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __UpperCAmelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) def snake_case__ ( self : Union[str, Any] ) -> str: __UpperCAmelCase = BitsAndBytesConfig() __UpperCAmelCase = True __UpperCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__a , device_map='''auto''' ) __UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __UpperCAmelCase = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) def snake_case__ ( self : Optional[Any] ) -> str: with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__a ) def snake_case__ ( self : List[Any] ) -> Optional[int]: __UpperCAmelCase = BitsAndBytesConfig() with self.assertRaises(__a ): __UpperCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=__a , load_in_abit=__a , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def snake_case__ ( self : List[Any] ) -> Optional[int]: with self.assertRaises(__a ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(__a ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__a ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(__a ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__a ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) __UpperCAmelCase = self.model_fpaa.to(torch.floataa ) __UpperCAmelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 ) # Check this does not throw an error __UpperCAmelCase = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error __UpperCAmelCase = self.model_fpaa.half() # Check this does not throw an error __UpperCAmelCase = self.model_fpaa.float() def snake_case__ ( self : str ) -> Dict: __UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__a , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A ( unittest.TestCase ): @classmethod def snake_case__ ( cls : int ) -> Any: __UpperCAmelCase = '''t5-small''' __UpperCAmelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense __UpperCAmelCase = AutoTokenizer.from_pretrained(cls.model_name ) __UpperCAmelCase = '''Translate in German: Hello, my dog is cute''' def snake_case__ ( self : Dict ) -> str: gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : List[Any] ) -> Dict: from transformers import TaForConditionalGeneration __UpperCAmelCase = TaForConditionalGeneration._keep_in_fpaa_modules __UpperCAmelCase = None # test with `t5-small` __UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='''auto''' ) __UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __UpperCAmelCase = model.generate(**__a ) # test with `flan-t5-small` __UpperCAmelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__a , device_map='''auto''' ) __UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __UpperCAmelCase = model.generate(**__a ) __UpperCAmelCase = modules def snake_case__ ( self : str ) -> List[Any]: import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __UpperCAmelCase = model.generate(**__a ) # test with `flan-t5-small` __UpperCAmelCase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=__a , device_map='''auto''' ) __UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) __UpperCAmelCase = model.generate(**__a ) class A ( UpperCAmelCase ): def snake_case__ ( self : str ) -> List[Any]: super().setUp() # model_name __UpperCAmelCase = '''bigscience/bloom-560m''' __UpperCAmelCase = '''t5-small''' # Different types of model __UpperCAmelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='''auto''' ) # Sequence classification model __UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=__a , device_map='''auto''' ) # CausalLM model __UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='''auto''' ) # Seq2seq model __UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=__a , device_map='''auto''' ) def snake_case__ ( self : Any ) -> int: del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : str ) -> Optional[int]: from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A ( UpperCAmelCase ): def snake_case__ ( self : str ) -> Optional[Any]: super().setUp() def snake_case__ ( self : Optional[int] ) -> Union[str, Any]: del self.pipe gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : int ) -> List[str]: __UpperCAmelCase = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __UpperCAmelCase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A ( UpperCAmelCase ): def snake_case__ ( self : int ) -> int: super().setUp() def snake_case__ ( self : Dict ) -> Any: __UpperCAmelCase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=__a , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch __UpperCAmelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS ) class A ( UpperCAmelCase ): def snake_case__ ( self : Union[str, Any] ) -> int: __UpperCAmelCase = '''facebook/opt-350m''' super().setUp() def snake_case__ ( self : Optional[Any] ) -> List[str]: if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters __UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __UpperCAmelCase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __UpperCAmelCase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__a ) ): __UpperCAmelCase = LoRALayer(module.q_proj , rank=1_6 ) __UpperCAmelCase = LoRALayer(module.k_proj , rank=1_6 ) __UpperCAmelCase = LoRALayer(module.v_proj , rank=1_6 ) # Step 3: dummy batch __UpperCAmelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __UpperCAmelCase = model.forward(**__a ) out.logits.norm().backward() for module in model.modules(): if isinstance(__a , __a ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__a , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A ( UpperCAmelCase ): a_ = '''gpt2-xl''' a_ = 3.3_191_854_854_152_187
711
'''simple docstring''' from __future__ import annotations from statistics import mean def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = [0] * no_of_processes __UpperCAmelCase = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(UpperCamelCase__ ): __UpperCAmelCase = burst_time[i] __UpperCAmelCase = [] __UpperCAmelCase = 0 __UpperCAmelCase = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: __UpperCAmelCase = [] __UpperCAmelCase = -1 for i in range(UpperCamelCase__ ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: __UpperCAmelCase = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: __UpperCAmelCase = i total_time += burst_time[target_process] completed += 1 __UpperCAmelCase = 0 __UpperCAmelCase = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ): """simple docstring""" __UpperCAmelCase = [0] * no_of_processes for i in range(UpperCamelCase__ ): __UpperCAmelCase = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("[TEST CASE 01]") __lowerCAmelCase : List[Any] = 4 __lowerCAmelCase : List[Any] = [2, 5, 3, 7] __lowerCAmelCase : Tuple = [0, 0, 0, 0] __lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes) __lowerCAmelCase : Dict = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time") for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
654
0
'''simple docstring''' import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCAmelCase ) , '''Tatoeba directory does not exist.''' ) class A ( unittest.TestCase ): @cached_property def snake_case__ ( self : Dict ) -> str: __UpperCAmelCase = tempfile.mkdtemp() return TatoebaConverter(save_dir=__a ) @slow def snake_case__ ( self : List[Any] ) -> Tuple: self.resolver.convert_models(['''heb-eng'''] ) @slow def snake_case__ ( self : List[Any] ) -> Optional[int]: __UpperCAmelCase , __UpperCAmelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__a ) assert mmeta["long_pair"] == "heb-eng"
712
'''simple docstring''' from ..utils import DummyObject, requires_backends class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : int , **__a : int ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict: requires_backends(cls , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ): """simple docstring""" requires_backends(UpperCamelCase__ , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : str , **__a : Any ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : Any , **__a : int ) -> Tuple: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str: requires_backends(cls , ['''torch'''] ) class A ( metaclass=UpperCAmelCase ): a_ = ['''torch'''] def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]: requires_backends(self , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any: requires_backends(cls , ['''torch'''] ) @classmethod def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]: requires_backends(cls , ['''torch'''] )
654
0
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __lowerCAmelCase : Tuple = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class A ( unittest.TestCase ): a_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING a_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: a_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: a_ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def snake_case__ ( self : Union[str, Any] ) -> Dict: __UpperCAmelCase = pipeline( task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' ) __UpperCAmelCase = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(__a ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] ) __UpperCAmelCase = text_classifier('''This is great !''' , top_k=2 ) self.assertEqual( nested_simplify(__a ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}] ) __UpperCAmelCase = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 ) self.assertEqual( nested_simplify(__a ) , [ [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}], [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}], ] , ) __UpperCAmelCase = text_classifier('''This is great !''' , top_k=1 ) self.assertEqual(nested_simplify(__a ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] ) # Legacy behavior __UpperCAmelCase = text_classifier('''This is great !''' , return_all_scores=__a ) self.assertEqual(nested_simplify(__a ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] ) __UpperCAmelCase = text_classifier('''This is great !''' , return_all_scores=__a ) self.assertEqual( nested_simplify(__a ) , [[{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}]] ) __UpperCAmelCase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__a ) self.assertEqual( nested_simplify(__a ) , [ [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}], [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_1''', '''score''': 0.4_9_6}], ] , ) __UpperCAmelCase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__a ) self.assertEqual( nested_simplify(__a ) , [ {'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, {'''label''': '''LABEL_0''', '''score''': 0.5_0_4}, ] , ) @require_torch def snake_case__ ( self : Optional[Any] ) -> Tuple: import torch __UpperCAmelCase = pipeline( task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , ) __UpperCAmelCase = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(__a ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] ) @require_tf def snake_case__ ( self : int ) -> Tuple: __UpperCAmelCase = pipeline( task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' ) __UpperCAmelCase = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(__a ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_0_4}] ) @slow @require_torch def snake_case__ ( self : List[Any] ) -> Optional[int]: __UpperCAmelCase = pipeline('''text-classification''' ) __UpperCAmelCase = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(__a ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] ) __UpperCAmelCase = text_classifier('''This is bad !''' ) self.assertEqual(nested_simplify(__a ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] ) __UpperCAmelCase = text_classifier('''Birds are a type of animal''' ) self.assertEqual(nested_simplify(__a ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_8_8}] ) @slow @require_tf def snake_case__ ( self : List[Any] ) -> Dict: __UpperCAmelCase = pipeline('''text-classification''' , framework='''tf''' ) __UpperCAmelCase = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(__a ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] ) __UpperCAmelCase = text_classifier('''This is bad !''' ) self.assertEqual(nested_simplify(__a ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] ) __UpperCAmelCase = text_classifier('''Birds are a type of animal''' ) self.assertEqual(nested_simplify(__a ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_8_8}] ) def snake_case__ ( self : List[Any] , __a : List[Any] , __a : Optional[int] , __a : Any ) -> List[Any]: __UpperCAmelCase = TextClassificationPipeline(model=__a , tokenizer=__a ) return text_classifier, ["HuggingFace is in", "This is another test"] def snake_case__ ( self : int , __a : List[str] , __a : str ) -> Dict: __UpperCAmelCase = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 __UpperCAmelCase = '''HuggingFace is in''' __UpperCAmelCase = text_classifier(__a ) self.assertEqual(nested_simplify(__a ) , [{'''label''': ANY(__a ), '''score''': ANY(__a )}] ) self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() ) __UpperCAmelCase = ['''HuggingFace is in ''', '''Paris is in France'''] __UpperCAmelCase = text_classifier(__a ) self.assertEqual( nested_simplify(__a ) , [{'''label''': ANY(__a ), '''score''': ANY(__a )}, {'''label''': ANY(__a ), '''score''': ANY(__a )}] , ) self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format __UpperCAmelCase = text_classifier(__a , top_k=__a ) __UpperCAmelCase = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(__a ) , [[{'''label''': ANY(__a ), '''score''': ANY(__a )}] * N, [{'''label''': ANY(__a ), '''score''': ANY(__a )}] * N] , ) __UpperCAmelCase = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''} __UpperCAmelCase = text_classifier(__a ) self.assertEqual( nested_simplify(__a ) , {'''label''': ANY(__a ), '''score''': ANY(__a )} , ) self.assertTrue(outputs['''label'''] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. __UpperCAmelCase = [['''HuggingFace is in ''', '''Paris is in France''']] with self.assertRaises(__a ): text_classifier(__a ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility __UpperCAmelCase = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] ) self.assertEqual( nested_simplify(__a ) , [{'''label''': ANY(__a ), '''score''': ANY(__a )}] , ) self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
713
'''simple docstring''' import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
654
0
import glob import os import random from string import ascii_lowercase, digits import cva __lowerCAmelCase : Any = "" __lowerCAmelCase : int = "" __lowerCAmelCase : Union[str, Any] = "" __lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ ) print('''Processing...''' ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for index, image in enumerate(UpperCamelCase__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __UpperCAmelCase = random_chars(3_2 ) __UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] ) print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" ) __UpperCAmelCase = [] for anno in new_annos[index]: __UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(UpperCamelCase__ ) with open(f"""/{file_root}.txt""" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ): """simple docstring""" __UpperCAmelCase = [] __UpperCAmelCase = [] for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ): __UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(UpperCamelCase__ ) as in_file: __UpperCAmelCase = in_file.readlines() __UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" ) __UpperCAmelCase = [] for obj_list in obj_lists: __UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(UpperCamelCase__ ) labels.append(UpperCamelCase__ ) return img_paths, labels def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ): """simple docstring""" __UpperCAmelCase = [] __UpperCAmelCase = [] __UpperCAmelCase = [] for idx in range(len(UpperCamelCase__ ) ): __UpperCAmelCase = [] __UpperCAmelCase = img_list[idx] path_list.append(UpperCamelCase__ ) __UpperCAmelCase = anno_list[idx] __UpperCAmelCase = cva.imread(UpperCamelCase__ ) if flip_type == 1: __UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ ) for bbox in img_annos: __UpperCAmelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ ) for bbox in img_annos: __UpperCAmelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(UpperCamelCase__ ) new_imgs_list.append(UpperCamelCase__ ) return new_imgs_list, new_annos_lists, path_list def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" __UpperCAmelCase = ascii_lowercase + digits return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) ) if __name__ == "__main__": main() print("DONE ✅")
714
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : Optional[Any] = { "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = ["LlamaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = ["LlamaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = [ "LlamaForCausalLM", "LlamaModel", "LlamaPreTrainedModel", "LlamaForSequenceClassification", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
654
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : Tuple = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Any = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
715
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ): """simple docstring""" __UpperCAmelCase = {} if train_file is not None: __UpperCAmelCase = [train_file] if eval_file is not None: __UpperCAmelCase = [eval_file] if test_file is not None: __UpperCAmelCase = [test_file] __UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ ) __UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() ) __UpperCAmelCase = features_name.pop(UpperCamelCase__ ) __UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) ) __UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )} __UpperCAmelCase = tokenizer.model_input_names __UpperCAmelCase = {} if len(UpperCamelCase__ ) == 1: for k in files.keys(): __UpperCAmelCase = ds[k].map( lambda UpperCamelCase__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , ) elif len(UpperCamelCase__ ) == 2: for k in files.keys(): __UpperCAmelCase = ds[k].map( lambda UpperCamelCase__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase = labelaid[ex[label_name]] yield (d, label) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __UpperCAmelCase = ( tf.data.Dataset.from_generator( UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid __lowerCAmelCase : List[Any] = logging.getLogger(__name__) @dataclass class A : a_ = field(metadata={'''help''': '''Which column contains the label'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} ) a_ = field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class A : a_ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. a_ = field( default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def lowerCAmelCase ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ f"""16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , ) def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict: __UpperCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __UpperCAmelCase = TFTrainer( model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCAmelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __UpperCAmelCase = trainer.evaluate() __UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(UpperCamelCase__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) results.update(UpperCamelCase__ ) return results if __name__ == "__main__": main()
654
0
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __lowerCAmelCase : Optional[int] = NewType("DataClass", Any) __lowerCAmelCase : Any = NewType("DataClassType", Any) def lowerCAmelCase ( UpperCamelCase__ : Any ): """simple docstring""" if isinstance(UpperCamelCase__ , UpperCamelCase__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def lowerCAmelCase ( UpperCamelCase__ : list ): """simple docstring""" __UpperCAmelCase = {str(UpperCamelCase__ ): choice for choice in choices} return lambda UpperCamelCase__ : str_to_choice.get(UpperCamelCase__ , UpperCamelCase__ ) def lowerCAmelCase ( *, UpperCamelCase__ : Union[str, List[str]] = None , UpperCamelCase__ : str = None , UpperCamelCase__ : Any = dataclasses.MISSING , UpperCamelCase__ : Callable[[], Any] = dataclasses.MISSING , UpperCamelCase__ : dict = None , **UpperCamelCase__ : Optional[Any] , ): """simple docstring""" if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls __UpperCAmelCase = {} if aliases is not None: __UpperCAmelCase = aliases if help is not None: __UpperCAmelCase = help return dataclasses.field(metadata=UpperCamelCase__ , default=UpperCamelCase__ , default_factory=UpperCamelCase__ , **UpperCamelCase__ ) class A ( UpperCAmelCase ): a_ = 4_2 def __init__( self : Optional[int] , __a : Union[DataClassType, Iterable[DataClassType]] , **__a : Any ) -> Optional[int]: # To make the default appear when using --help if "formatter_class" not in kwargs: __UpperCAmelCase = ArgumentDefaultsHelpFormatter super().__init__(**__a ) if dataclasses.is_dataclass(__a ): __UpperCAmelCase = [dataclass_types] __UpperCAmelCase = list(__a ) for dtype in self.dataclass_types: self._add_dataclass_arguments(__a ) @staticmethod def snake_case__ ( __a : ArgumentParser , __a : dataclasses.Field ) -> Optional[Any]: __UpperCAmelCase = f"""--{field.name}""" __UpperCAmelCase = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __a ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) __UpperCAmelCase = kwargs.pop('''aliases''' , [] ) if isinstance(__a , __a ): __UpperCAmelCase = [aliases] __UpperCAmelCase = getattr(field.type , '''__origin__''' , field.type ) if origin_type is Union or (hasattr(__a , '''UnionType''' ) and isinstance(__a , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(__a ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' f""" Problem encountered in field '{field.name}'.""" ) if type(__a ) not in field.type.__args__: # filter `str` in Union __UpperCAmelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] __UpperCAmelCase = getattr(field.type , '''__origin__''' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) __UpperCAmelCase = ( field.type.__args__[0] if isinstance(__a , field.type.__args__[1] ) else field.type.__args__[1] ) __UpperCAmelCase = getattr(field.type , '''__origin__''' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) __UpperCAmelCase = {} if origin_type is Literal or (isinstance(field.type , __a ) and issubclass(field.type , __a )): if origin_type is Literal: __UpperCAmelCase = field.type.__args__ else: __UpperCAmelCase = [x.value for x in field.type] __UpperCAmelCase = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: __UpperCAmelCase = field.default else: __UpperCAmelCase = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument __UpperCAmelCase = copy(__a ) # Hack because type=bool in argparse does not behave as we want. __UpperCAmelCase = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. __UpperCAmelCase = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way __UpperCAmelCase = default # This tells argparse we accept 0 or 1 value after --field_name __UpperCAmelCase = '''?''' # This is the value that will get picked if we do --field_name (without value) __UpperCAmelCase = True elif isclass(__a ) and issubclass(__a , __a ): __UpperCAmelCase = field.type.__args__[0] __UpperCAmelCase = '''+''' if field.default_factory is not dataclasses.MISSING: __UpperCAmelCase = field.default_factory() elif field.default is dataclasses.MISSING: __UpperCAmelCase = True else: __UpperCAmelCase = field.type if field.default is not dataclasses.MISSING: __UpperCAmelCase = field.default elif field.default_factory is not dataclasses.MISSING: __UpperCAmelCase = field.default_factory() else: __UpperCAmelCase = True parser.add_argument(__a , *__a , **__a ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): __UpperCAmelCase = False parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__a ) def snake_case__ ( self : Optional[int] , __a : DataClassType ) -> Optional[Any]: if hasattr(__a , '''_argument_group_name''' ): __UpperCAmelCase = self.add_argument_group(dtype._argument_group_name ) else: __UpperCAmelCase = self try: __UpperCAmelCase = get_type_hints(__a ) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__a ): __UpperCAmelCase = '''.'''.join(map(__a , sys.version_info[:3] ) ) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(__a ): if not field.init: continue __UpperCAmelCase = type_hints[field.name] self._parse_dataclass_field(__a , __a ) def snake_case__ ( self : str , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=True , __a : int=None , __a : Optional[Any]=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): __UpperCAmelCase = [] if args_filename: args_files.append(Path(__a ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values __UpperCAmelCase = ArgumentParser() args_file_parser.add_argument(__a , type=__a , action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) __UpperCAmelCase , __UpperCAmelCase = args_file_parser.parse_known_args(args=__a ) __UpperCAmelCase = vars(__a ).get(args_file_flag.lstrip('''-''' ) , __a ) if cmd_args_file_paths: args_files.extend([Path(__a ) for p in cmd_args_file_paths] ) __UpperCAmelCase = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last __UpperCAmelCase = file_args + args if args is not None else file_args + sys.argv[1:] __UpperCAmelCase , __UpperCAmelCase = self.parse_known_args(args=__a ) __UpperCAmelCase = [] for dtype in self.dataclass_types: __UpperCAmelCase = {f.name for f in dataclasses.fields(__a ) if f.init} __UpperCAmelCase = {k: v for k, v in vars(__a ).items() if k in keys} for k in keys: delattr(__a , __a ) __UpperCAmelCase = dtype(**__a ) outputs.append(__a ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(__a ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" ) return (*outputs,) def snake_case__ ( self : Optional[int] , __a : Dict[str, Any] , __a : bool = False ) -> Tuple[DataClass, ...]: __UpperCAmelCase = set(args.keys() ) __UpperCAmelCase = [] for dtype in self.dataclass_types: __UpperCAmelCase = {f.name for f in dataclasses.fields(__a ) if f.init} __UpperCAmelCase = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) __UpperCAmelCase = dtype(**__a ) outputs.append(__a ) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__a )}""" ) return tuple(__a ) def snake_case__ ( self : Optional[int] , __a : str , __a : bool = False ) -> Tuple[DataClass, ...]: with open(Path(__a ) , encoding='''utf-8''' ) as open_json_file: __UpperCAmelCase = json.loads(open_json_file.read() ) __UpperCAmelCase = self.parse_dict(__a , allow_extra_keys=__a ) return tuple(__a ) def snake_case__ ( self : Optional[int] , __a : str , __a : bool = False ) -> Tuple[DataClass, ...]: __UpperCAmelCase = self.parse_dict(yaml.safe_load(Path(__a ).read_text() ) , allow_extra_keys=__a ) return tuple(__a )
716
'''simple docstring''' from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class A : def __init__( self : List[Any] , __a : Any , ) -> Dict: __UpperCAmelCase = parent __UpperCAmelCase = 1_3 __UpperCAmelCase = 7 __UpperCAmelCase = True __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = True __UpperCAmelCase = 9_9 __UpperCAmelCase = 3_2 __UpperCAmelCase = 2 __UpperCAmelCase = 4 __UpperCAmelCase = 3_7 __UpperCAmelCase = '''gelu''' __UpperCAmelCase = 0.1 __UpperCAmelCase = 0.1 __UpperCAmelCase = 5_1_2 __UpperCAmelCase = 1_6 __UpperCAmelCase = 2 __UpperCAmelCase = 0.0_2 __UpperCAmelCase = 3 __UpperCAmelCase = 4 __UpperCAmelCase = None def snake_case__ ( self : Optional[int] ) -> Dict: __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase = None if self.use_input_mask: __UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any: __UpperCAmelCase = TFDistilBertModel(config=__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) __UpperCAmelCase = [input_ids, input_mask] __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int: __UpperCAmelCase = TFDistilBertForMaskedLM(config=__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict: __UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a ) __UpperCAmelCase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, } __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict: __UpperCAmelCase = self.num_labels __UpperCAmelCase = TFDistilBertForSequenceClassification(__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str: __UpperCAmelCase = self.num_choices __UpperCAmelCase = TFDistilBertForMultipleChoice(__a ) __UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, } __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int: __UpperCAmelCase = self.num_labels __UpperCAmelCase = TFDistilBertForTokenClassification(__a ) __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} __UpperCAmelCase = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : str ) -> Any: __UpperCAmelCase = self.prepare_config_and_inputs() ((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs __UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): a_ = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) a_ = ( { '''feature-extraction''': TFDistilBertModel, '''fill-mask''': TFDistilBertForMaskedLM, '''question-answering''': TFDistilBertForQuestionAnswering, '''text-classification''': TFDistilBertForSequenceClassification, '''token-classification''': TFDistilBertForTokenClassification, '''zero-shot''': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) a_ = False a_ = False def snake_case__ ( self : Any ) -> Any: __UpperCAmelCase = TFDistilBertModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 ) def snake_case__ ( self : List[Any] ) -> Optional[int]: self.config_tester.run_common_tests() def snake_case__ ( self : Any ) -> str: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__a ) def snake_case__ ( self : Tuple ) -> Dict: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__a ) def snake_case__ ( self : Union[str, Any] ) -> Any: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__a ) def snake_case__ ( self : Optional[Any] ) -> Dict: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a ) def snake_case__ ( self : Any ) -> int: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a ) def snake_case__ ( self : List[str] ) -> List[Any]: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__a ) @slow def snake_case__ ( self : Dict ) -> Tuple: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): __UpperCAmelCase = TFDistilBertModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @require_tf class A ( unittest.TestCase ): @slow def snake_case__ ( self : int ) -> Dict: __UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCAmelCase = model(__a )[0] __UpperCAmelCase = [1, 6, 7_6_8] self.assertEqual(output.shape , __a ) __UpperCAmelCase = tf.constant( [ [ [0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9], [0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4], [0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
654
0
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : int = 1_0 , UpperCamelCase__ : int = 2_2 ): """simple docstring""" __UpperCAmelCase = range(1 , UpperCamelCase__ ) __UpperCAmelCase = range(1 , UpperCamelCase__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(10, 22) = }""")
717
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __lowerCAmelCase : List[Any] = { "configuration_audio_spectrogram_transformer": [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ASTConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ASTForAudioClassification", "ASTModel", "ASTPreTrainedModel", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
654
0
def lowerCAmelCase ( UpperCamelCase__ : Dict ): """simple docstring""" __UpperCAmelCase = [False] * len(UpperCamelCase__ ) __UpperCAmelCase = [-1] * len(UpperCamelCase__ ) def dfs(UpperCamelCase__ : List[str] , UpperCamelCase__ : Any ): __UpperCAmelCase = True __UpperCAmelCase = c for u in graph[v]: if not visited[u]: dfs(UpperCamelCase__ , 1 - c ) for i in range(len(UpperCamelCase__ ) ): if not visited[i]: dfs(UpperCamelCase__ , 0 ) for i in range(len(UpperCamelCase__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph __lowerCAmelCase : Union[str, Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
718
'''simple docstring''' from ...configuration_utils import PretrainedConfig class A ( UpperCAmelCase ): a_ = '''bert-generation''' def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = hidden_act __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = position_embedding_type __UpperCAmelCase = use_cache
654
0
'''simple docstring''' import qiskit def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = qiskit.Aer.get_backend('''aer_simulator''' ) # Create a Quantum Circuit acting on the q register __UpperCAmelCase = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator __UpperCAmelCase = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(UpperCamelCase__ ) if __name__ == "__main__": print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
719
'''simple docstring''' from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) __lowerCAmelCase : str = 299_792_458 # Symbols __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z") def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 ) def lowerCAmelCase ( UpperCamelCase__ : float ): """simple docstring""" return np.array( [ [gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0], [-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ): """simple docstring""" # Ensure event is not empty if event is None: __UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(UpperCamelCase__ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: __lowerCAmelCase : Dict = transform(29_979_245) print("Example of four vector: ") print(F"""ct' = {four_vector[0]}""") print(F"""x' = {four_vector[1]}""") print(F"""y' = {four_vector[2]}""") print(F"""z' = {four_vector[3]}""") # Substitute symbols with numerical values __lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1} __lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)] print(F"""\n{numerical_vector}""")
654
0
'''simple docstring''' from collections.abc import Iterable from typing import Any class A : def __init__( self : Tuple , __a : int | None = None ) -> str: __UpperCAmelCase = value __UpperCAmelCase = None # Added in order to delete a node easier __UpperCAmelCase = None __UpperCAmelCase = None def __repr__( self : Optional[Any] ) -> str: from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 ) class A : def __init__( self : int , __a : Node | None = None ) -> List[Any]: __UpperCAmelCase = root def __str__( self : Any ) -> str: return str(self.root ) def snake_case__ ( self : List[str] , __a : Node , __a : Node | None ) -> None: if new_children is not None: # reset its kids __UpperCAmelCase = node.parent if node.parent is not None: # reset its parent if self.is_right(__a ): # If it is the right children __UpperCAmelCase = new_children else: __UpperCAmelCase = new_children else: __UpperCAmelCase = new_children def snake_case__ ( self : List[str] , __a : Node ) -> bool: if node.parent and node.parent.right: return node == node.parent.right return False def snake_case__ ( self : str ) -> bool: return self.root is None def snake_case__ ( self : Optional[int] , __a : List[str] ) -> None: __UpperCAmelCase = Node(__a ) # create a new Node if self.empty(): # if Tree is empty __UpperCAmelCase = new_node # set its root else: # Tree is not empty __UpperCAmelCase = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: __UpperCAmelCase = new_node # We insert the new node in a leaf break else: __UpperCAmelCase = parent_node.left else: if parent_node.right is None: __UpperCAmelCase = new_node break else: __UpperCAmelCase = parent_node.right __UpperCAmelCase = parent_node def snake_case__ ( self : str , *__a : int ) -> None: for value in values: self.__insert(__a ) def snake_case__ ( self : Optional[int] , __a : Dict ) -> Node | None: if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: __UpperCAmelCase = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: __UpperCAmelCase = node.left if value < node.value else node.right return node def snake_case__ ( self : List[Any] , __a : Node | None = None ) -> Node | None: if node is None: if self.root is None: return None __UpperCAmelCase = self.root if not self.empty(): while node.right is not None: __UpperCAmelCase = node.right return node def snake_case__ ( self : Any , __a : Node | None = None ) -> Node | None: if node is None: __UpperCAmelCase = self.root if self.root is None: return None if not self.empty(): __UpperCAmelCase = self.root while node.left is not None: __UpperCAmelCase = node.left return node def snake_case__ ( self : Tuple , __a : int ) -> None: __UpperCAmelCase = self.search(__a ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(__a , __a ) elif node.left is None: # Has only right children self.__reassign_nodes(__a , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(__a , node.left ) else: __UpperCAmelCase = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore __UpperCAmelCase = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def snake_case__ ( self : Optional[Any] , __a : Node | None ) -> Iterable: if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def snake_case__ ( self : Any , __a : Optional[int]=None ) -> Any: if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def snake_case__ ( self : Any , __a : list , __a : Node | None ) -> None: if node: self.inorder(__a , node.left ) arr.append(node.value ) self.inorder(__a , node.right ) def snake_case__ ( self : Any , __a : int , __a : Node ) -> int: __UpperCAmelCase = [] self.inorder(__a , __a ) # append all values to list using inorder traversal return arr[k - 1] def lowerCAmelCase ( UpperCamelCase__ : Node | None ): """simple docstring""" __UpperCAmelCase = [] if curr_node is not None: __UpperCAmelCase = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = (8, 3, 6, 1, 1_0, 1_4, 1_3, 4, 7) __UpperCAmelCase = BinarySearchTree() for i in testlist: t.insert(UpperCamelCase__ ) # Prints all the elements of the list in order traversal print(UpperCamelCase__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''' , t.get_max().value ) # type: ignore print('''Min Value: ''' , t.get_min().value ) # type: ignore for i in testlist: t.remove(UpperCamelCase__ ) print(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
720
'''simple docstring''' import heapq import sys import numpy as np __lowerCAmelCase : Any = tuple[int, int] class A : def __init__( self : Optional[int] ) -> int: __UpperCAmelCase = [] __UpperCAmelCase = set() def snake_case__ ( self : Optional[Any] ) -> List[Any]: if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def snake_case__ ( self : Dict ) -> Optional[int]: return len(self.elements ) == 0 def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]: if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(__a ) else: # update # print("update", item) __UpperCAmelCase = [] ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def snake_case__ ( self : int , __a : Any ) -> int: if item in self.set: self.set.remove(__a ) __UpperCAmelCase = [] ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def snake_case__ ( self : List[str] ) -> Dict: return self.elements[0][1] def snake_case__ ( self : Any ) -> List[str]: ((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements ) self.set.remove(__a ) return (priority, item) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # euclidean distance __UpperCAmelCase = np.array(UpperCamelCase__ ) __UpperCAmelCase = np.array(UpperCamelCase__ ) return np.linalg.norm(a - b ) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # integer division by time variable return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ): """simple docstring""" # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ): """simple docstring""" __UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ ) return ans def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ): """simple docstring""" __UpperCAmelCase = np.chararray((n, n) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): __UpperCAmelCase = '''*''' for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): if (j, (n - 1) - i) in blocks: __UpperCAmelCase = '''#''' __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[goal] while x != start: ((__UpperCAmelCase) , (__UpperCAmelCase)) = x # print(x) __UpperCAmelCase = '''-''' __UpperCAmelCase = back_pointer[x] __UpperCAmelCase = '''-''' for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) __UpperCAmelCase = back_pointer[goal] while x != start: print(UpperCamelCase__ , end=''' ''' ) __UpperCAmelCase = back_pointer[x] print(UpperCamelCase__ ) sys.exit() def lowerCAmelCase ( UpperCamelCase__ : TPos ): """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ): """simple docstring""" for itera in range(UpperCamelCase__ ): open_list[itera].remove_element(UpperCamelCase__ ) # print("s", s) # print("j", j) ((__UpperCAmelCase) , (__UpperCAmelCase)) = s __UpperCAmelCase = (x - 1, y) __UpperCAmelCase = (x + 1, y) __UpperCAmelCase = (x, y + 1) __UpperCAmelCase = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(UpperCamelCase__ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(UpperCamelCase__ ) __UpperCAmelCase = -1 __UpperCAmelCase = float('''inf''' ) if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1: __UpperCAmelCase = g_function[s] + 1 __UpperCAmelCase = s if neighbours not in close_list_anchor: open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) ) if neighbours not in close_list_inad: for var in range(1 , UpperCamelCase__ ): if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key( UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ): open_list[j].put( UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(1_5 , 2_0 ): some_list.append((x, 1_7) ) for x in range(1_0 , 1_9 ): for y in range(1 , 1_5 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(1_2 , 1_9 ): some_list.append((x, y) ) for x in range(3 , 1_3 ): for y in range(1_6 , 1_9 ): some_list.append((x, y) ) return some_list __lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __lowerCAmelCase : List[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __lowerCAmelCase : Dict = make_common_ground() __lowerCAmelCase : int = blocks_blk # hyper parameters __lowerCAmelCase : Dict = 1 __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : Union[str, Any] = 20 __lowerCAmelCase : Any = 3 # one consistent and two other inconsistent # start and end destination __lowerCAmelCase : Optional[Any] = (0, 0) __lowerCAmelCase : Any = (n - 1, n - 1) __lowerCAmelCase : Optional[int] = 1 def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = {start: 0, goal: float('''inf''' )} __UpperCAmelCase = {start: -1, goal: -1} __UpperCAmelCase = [] __UpperCAmelCase = set() for i in range(UpperCamelCase__ ): open_list.append(PriorityQueue() ) open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) __UpperCAmelCase = [] __UpperCAmelCase = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , UpperCamelCase__ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show() visited.add(UpperCamelCase__ ) expand_state( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) close_list_inad.append(UpperCamelCase__ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __UpperCAmelCase = open_list[0].top_show() visited.add(UpperCamelCase__ ) expand_state( UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) close_list_anchor.append(UpperCamelCase__ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(UpperCamelCase__ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
654
0
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : Any ): """simple docstring""" return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def lowerCAmelCase ( UpperCamelCase__ : dict[int, list[int]] ): """simple docstring""" __UpperCAmelCase = 0 __UpperCAmelCase = len(UpperCamelCase__ ) # No of vertices in graph __UpperCAmelCase = [0] * n __UpperCAmelCase = [False] * n def dfs(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ): __UpperCAmelCase = True __UpperCAmelCase = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , id_ ) __UpperCAmelCase = min(low[at] , low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge __UpperCAmelCase = min(low[at] , low[to] ) __UpperCAmelCase = [] for i in range(UpperCamelCase__ ): if not visited[i]: dfs(UpperCamelCase__ , -1 , UpperCamelCase__ , id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
721
'''simple docstring''' import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py __lowerCAmelCase : List[Any] = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. __lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. __lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") __lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) __lowerCAmelCase : Optional[int] = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ] def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" __UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ ) return [m.group(0 ) for m in matches] def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __UpperCAmelCase = { config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. __UpperCAmelCase = collections.defaultdict(UpperCamelCase__ ) __UpperCAmelCase = collections.defaultdict(UpperCamelCase__ ) __UpperCAmelCase = collections.defaultdict(UpperCamelCase__ ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(UpperCamelCase__ ): __UpperCAmelCase = None if _re_tf_models.match(UpperCamelCase__ ) is not None: __UpperCAmelCase = tf_models __UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0] elif _re_flax_models.match(UpperCamelCase__ ) is not None: __UpperCAmelCase = flax_models __UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0] elif _re_pt_models.match(UpperCamelCase__ ) is not None: __UpperCAmelCase = pt_models __UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0] if lookup_dict is not None: while len(UpperCamelCase__ ) > 0: if attr_name in model_prefix_to_model_type: __UpperCAmelCase = True break # Try again after removing the last word in the name __UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] ) __UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) __UpperCAmelCase = list(UpperCamelCase__ ) all_models.sort() __UpperCAmelCase = {'''model_type''': all_models} __UpperCAmelCase = [pt_models[t] for t in all_models] __UpperCAmelCase = [tf_models[t] for t in all_models] __UpperCAmelCase = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure __UpperCAmelCase = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: __UpperCAmelCase = '''AutoProcessor''' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: __UpperCAmelCase = '''AutoTokenizer''' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: __UpperCAmelCase = '''AutoFeatureExtractor''' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. __UpperCAmelCase = '''AutoTokenizer''' __UpperCAmelCase = [processors[t] for t in all_models] return pd.DataFrame(UpperCamelCase__ ) def lowerCAmelCase ( UpperCamelCase__ : List[str] ): """simple docstring""" __UpperCAmelCase = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: __UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""] __UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): # The type of pipeline may not exist in this framework if not hasattr(UpperCamelCase__ , UpperCamelCase__ ): continue # First extract all model_names __UpperCAmelCase = [] for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): model_names.append(UpperCamelCase__ ) else: model_names.extend(list(UpperCamelCase__ ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ): """simple docstring""" __UpperCAmelCase = get_frameworks_table() __UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ ) __UpperCAmelCase = hf_hub_download( '''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ ) __UpperCAmelCase = Dataset.from_json(UpperCamelCase__ ) __UpperCAmelCase = { tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class''']) for i in range(len(UpperCamelCase__ ) ) } __UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. __UpperCAmelCase = sorted(table.keys() ) __UpperCAmelCase = pd.DataFrame( { '''model_class''': model_classes, '''pipeline_tag''': [table[m][0] for m in model_classes], '''auto_class''': [table[m][1] for m in model_classes], } ) __UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) ) tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) ) if commit_sha is not None: __UpperCAmelCase = ( f"""Update with commit {commit_sha}\n\nSee: """ f"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: __UpperCAmelCase = '''Update''' upload_folder( repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , ) def lowerCAmelCase ( ): """simple docstring""" __UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} __UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS __UpperCAmelCase = [] for key in pipeline_tasks: if key not in in_table: __UpperCAmelCase = pipeline_tasks[key]['''pt'''] if isinstance(UpperCamelCase__ , (list, tuple) ): __UpperCAmelCase = model[0] __UpperCAmelCase = model.__name__ if model not in in_table.values(): missing.append(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: __UpperCAmelCase = ''', '''.join(UpperCamelCase__ ) raise ValueError( '''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ''' f"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") __lowerCAmelCase : Tuple = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
654
0