code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,)
lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 346 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
"""simple docstring"""
self.test()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
while not completed:
if counter == 1:
self.reset()
UpperCAmelCase__ = self.advance()
if not self.does_advance(_UpperCAmelCase ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase )
counter += 1
if counter > 1_00_00:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCAmelCase__ = token_ids
UpperCAmelCase__ = len(self.token_ids )
UpperCAmelCase__ = -1 # the index of the currently fulfilled step
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.fulfilled_idx += 1
UpperCAmelCase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCAmelCase__ = True
UpperCAmelCase__ = completed
else:
# failed to make progress.
UpperCAmelCase__ = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = 0
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ):
"""simple docstring"""
UpperCAmelCase__ = PhrasalConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.fulfilled_idx
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ):
"""simple docstring"""
UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] )
UpperCAmelCase__ = {}
for token_ids in nested_token_ids:
UpperCAmelCase__ = root
for tidx, token_id in enumerate(_UpperCAmelCase ):
if token_id not in level:
UpperCAmelCase__ = {}
UpperCAmelCase__ = level[token_id]
if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f''' {nested_token_ids}.''' )
UpperCAmelCase__ = root
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = self.trie
for current_token in current_seq:
UpperCAmelCase__ = start[current_token]
UpperCAmelCase__ = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase )
return len(_UpperCAmelCase ) == 0
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = list(root.values() )
if len(_UpperCAmelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase )
return len(_UpperCAmelCase ) != leaf_count
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase )
UpperCAmelCase__ = nested_token_ids
UpperCAmelCase__ = self.trie.max_height
UpperCAmelCase__ = []
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.current_seq.append(_UpperCAmelCase )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = True
self.reset()
UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq )
UpperCAmelCase__ = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ):
"""simple docstring"""
UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.current_seq
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ):
"""simple docstring"""
UpperCAmelCase__ = constraints
# max # of steps required to fulfill a given constraint
UpperCAmelCase__ = max([c.seqlen for c in constraints] )
UpperCAmelCase__ = len(_UpperCAmelCase )
UpperCAmelCase__ = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = None
UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCAmelCase__ = constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
else:
UpperCAmelCase__ = self.inprogress_constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCAmelCase__ , UpperCAmelCase__ = False, False
if self.completed:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) )
UpperCAmelCase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCAmelCase__ = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCAmelCase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_UpperCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(_UpperCAmelCase )
UpperCAmelCase__ = None
if not complete and stepped:
UpperCAmelCase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCAmelCase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCAmelCase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ):
"""simple docstring"""
UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCAmelCase__ = [
constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase )
UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 346 | 1 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase__ = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
UpperCAmelCase__ = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
UpperCAmelCase__ = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
UpperCAmelCase__ = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
UpperCAmelCase__ = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Tuple ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=[1, 10, 100] , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Union[str, Any]=3.0 ):
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__lowerCAmelCase ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(__lowerCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + """\n""" + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(__lowerCAmelCase , *__lowerCAmelCase )
futures.append(__lowerCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__lowerCAmelCase ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_UpperCAmelCase , _UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(__lowerCAmelCase ) )
correct.append(sum(__lowerCAmelCase ) )
_UpperCAmelCase = np.array(__lowerCAmelCase )
_UpperCAmelCase = np.array(__lowerCAmelCase )
_UpperCAmelCase = k
_UpperCAmelCase = {f'''pass@{k}''': estimate_pass_at_k(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
def estimator(lowercase ,lowercase ,lowercase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(lowercase ,lowercase ):
_UpperCAmelCase = itertools.repeat(lowercase ,len(lowercase ) )
else:
assert len(lowercase ) == len(lowercase )
_UpperCAmelCase = iter(lowercase )
return np.array([estimator(int(lowercase ) ,int(lowercase ) ,lowercase ) for n, c in zip(lowercase ,lowercase )] )
| 355 | """simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 30 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A : str = None
__A : Union[str, Any] = logging.get_logger(__name__)
__A : int = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : Union[str, Any] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__A : Optional[int] = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
__A : List[str] = "▁"
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = BigBirdTokenizer
lowercase = ['input_ids', 'attention_mask']
lowercase = []
def __init__( self : Dict , lowerCamelCase : List[str]=None , lowerCamelCase : List[str]=None , lowerCamelCase : str="<unk>" , lowerCamelCase : int="<s>" , lowerCamelCase : Union[str, Any]="</s>" , lowerCamelCase : str="<pad>" , lowerCamelCase : int="[SEP]" , lowerCamelCase : Union[str, Any]="[MASK]" , lowerCamelCase : Optional[int]="[CLS]" , **lowerCamelCase : Optional[int] , ) -> int:
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
lowerCAmelCase_ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
lowerCAmelCase_ : Dict = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , **lowerCamelCase , )
lowerCAmelCase_ : Optional[Any] = vocab_file
lowerCAmelCase_ : Tuple = False if not self.vocab_file else True
def __lowercase ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowercase ( self : List[Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1]
def __lowercase ( self : Union[str, Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase_ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 120 |
'''simple docstring'''
from math import ceil
def UpperCamelCase_ ( A__ : int = 10_01 ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : int = 2 * i + 1
lowerCAmelCase_ : Tuple = 2 * i
lowerCAmelCase_ : Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__A : str = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 120 | 1 |
import argparse
from collections import defaultdict
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
_snake_case = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(_snake_case , '''r''' ) as f:
_snake_case = f.readlines()
_snake_case = F"""class {class_name}("""
_snake_case = F"""{4 * ' '}def {test_name}("""
_snake_case = F"""{8 * ' '}{correct_line.split()[0]}"""
_snake_case = F"""{16 * ' '}{correct_line.split()[0]}"""
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = 0
_snake_case = 0
_snake_case = []
for line in lines:
if line.startswith(_snake_case ):
_snake_case = True
elif in_class and line.startswith(_snake_case ):
_snake_case = True
elif in_class and in_func and (line.startswith(_snake_case ) or line.startswith(_snake_case )):
_snake_case = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_snake_case = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_snake_case = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * ' '}{correct_line}""" )
_snake_case = False
else:
new_lines.append(_snake_case )
with open(_snake_case , '''w''' ) as f:
for line in new_lines:
f.write(_snake_case )
def snake_case_(_UpperCamelCase , _UpperCamelCase=None ) -> List[Any]:
"""simple docstring"""
if fail is not None:
with open(_snake_case , '''r''' ) as f:
_snake_case = {l.strip() for l in f.readlines()}
else:
_snake_case = None
with open(_snake_case , '''r''' ) as f:
_snake_case = f.readlines()
_snake_case = defaultdict(_snake_case )
for line in correct_lines:
_snake_case = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
__A = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 352 |
from __future__ import annotations
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> bool:
"""simple docstring"""
_snake_case = get_failure_array(_UpperCamelCase )
# 2) Step through text searching for pattern
_snake_case, _snake_case = 0, 0 # index into text, pattern
while i < len(_UpperCamelCase ):
if pattern[j] == text[i]:
if j == (len(_UpperCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_snake_case = failure[j - 1]
continue
i += 1
return False
def snake_case_(_UpperCamelCase ) -> list[int]:
"""simple docstring"""
_snake_case = [0]
_snake_case = 0
_snake_case = 1
while j < len(_UpperCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_snake_case = failure[i - 1]
continue
j += 1
failure.append(_UpperCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
__A = '''abc1abc12'''
__A = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__A = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__A = '''ABABX'''
__A = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
__A = '''AAAB'''
__A = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
__A = '''abcdabcy'''
__A = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
__A = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 278 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : List[Any] = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 274 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __lowerCamelCase ( __a :List[str] , __a :List[Any] , __a :Union[str, Any] , __a :List[Any] ) -> Dict:
"""simple docstring"""
A__ = multiprocessing.Manager()
A__ = manager.list()
A__ = multiprocessing.Process(target=__a , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __lowerCamelCase ( __a :Optional[Any] , __a :Any , __a :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
A__ = shutil.rmtree
A__ = os.rmdir
A__ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
A__ = {}
with swallow_io():
with time_limit(__a ):
exec(__a , __a )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
A__ = rmtree
A__ = rmdir
A__ = chdir
@contextlib.contextmanager
def __lowerCamelCase ( __a :List[str] ) -> Dict:
"""simple docstring"""
def signal_handler(__a :List[Any] , __a :Optional[Any] ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , __a )
signal.signal(signal.SIGALRM , __a )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = WriteOnlyStringIO()
with contextlib.redirect_stdout(__a ):
with contextlib.redirect_stderr(__a ):
with redirect_stdin(__a ):
yield
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(__a ):
yield dirname
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class A (io.StringIO ):
'''simple docstring'''
def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
raise OSError
def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
raise OSError
def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int:
"""simple docstring"""
raise OSError
def a_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return False
class A (contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = '''stdin'''
@contextlib.contextmanager
def __lowerCamelCase ( __a :Union[str, Any] ) -> List[str]:
"""simple docstring"""
if root == ".":
yield
return
A__ = os.getcwd()
os.chdir(__a )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__a )
def __lowerCamelCase ( __a :Union[str, Any]=None ) -> Dict:
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
A__ = None
A__ = None
import os
A__ = """1"""
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
import shutil
A__ = None
A__ = None
A__ = None
import subprocess
A__ = None # type: ignore
A__ = None
import sys
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
| 274 | 1 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int ):
return int(input_a == input_a == 0 )
def _snake_case ( ):
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"| 0 | 0 | {nor_gate(0 , 0 )} |" )
print(F"| 0 | 1 | {nor_gate(0 , 1 )} |" )
print(F"| 1 | 0 | {nor_gate(1 , 0 )} |" )
print(F"| 1 | 1 | {nor_gate(1 , 1 )} |" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 362 |
"""simple docstring"""
import math
import sys
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Dict = """"""
try:
with open(UpperCamelCase , """rb""" ) as binary_file:
UpperCAmelCase : str = binary_file.read()
for dat in data:
UpperCAmelCase : List[Any] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Optional[int] = {"""0""": """0""", """1""": """1"""}
UpperCAmelCase , UpperCAmelCase : Optional[int] = """""", """"""
UpperCAmelCase : int = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase : Any = lexicon[curr_string]
result += last_match_id
UpperCAmelCase : Any = last_match_id + """0"""
if math.loga(UpperCamelCase ).is_integer():
UpperCAmelCase : Optional[Any] = {}
for curr_key in list(UpperCamelCase ):
UpperCAmelCase : Dict = lexicon.pop(UpperCamelCase )
UpperCAmelCase : int = new_lex
UpperCAmelCase : int = last_match_id + """1"""
index += 1
UpperCAmelCase : List[str] = """"""
return result
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : Dict = 8
try:
with open(UpperCamelCase , """wb""" ) as opened_file:
UpperCAmelCase : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : Any = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCAmelCase : List[str] = data_bits[counter:]
UpperCAmelCase : Tuple = data_bits[counter + 1 :]
return data_bits
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : int = read_file_binary(UpperCamelCase )
UpperCAmelCase : str = remove_prefix(UpperCamelCase )
UpperCAmelCase : Any = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 76 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class a__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , a : Optional[int] , a : str=13 , a : str=7 , a : Optional[Any]=True , a : List[Any]=True , a : str=True , a : str=True , a : Dict=99 , a : Dict=32 , a : Union[str, Any]=5 , a : Tuple=4 , a : List[str]=37 , a : Optional[int]="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : Any=5_12 , a : int=16 , a : List[Any]=2 , a : List[str]=0.02 , a : Any=4 , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =True
lowerCamelCase : Dict =(
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=a )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class a__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=a )
__lowerCamelCase = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(a )[0]
__lowerCamelCase = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , a )
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=a )
__lowerCamelCase = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
__lowerCamelCase = model(a )[0]
# compare the actual values for a slice.
__lowerCamelCase = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
| 67 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : int = tau * frequency / samplerate
UpperCAmelCase_ : List[str] = sin(__lowerCamelCase )
UpperCAmelCase_ : int = cos(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : int = (1 - _cos) / 2
UpperCAmelCase_ : Optional[Any] = 1 - _cos
UpperCAmelCase_ : int = 1 + alpha
UpperCAmelCase_ : Dict = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha
UpperCAmelCase_ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Dict = tau * frequency / samplerate
UpperCAmelCase_ : Tuple = sin(__lowerCamelCase )
UpperCAmelCase_ : Any = cos(__lowerCamelCase )
UpperCAmelCase_ : List[str] = _sin / (2 * q_factor)
UpperCAmelCase_ : List[Any] = (1 + _cos) / 2
UpperCAmelCase_ : Optional[int] = -1 - _cos
UpperCAmelCase_ : Union[str, Any] = 1 + alpha
UpperCAmelCase_ : Optional[int] = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha
UpperCAmelCase_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Union[str, Any] = tau * frequency / samplerate
UpperCAmelCase_ : str = sin(__lowerCamelCase )
UpperCAmelCase_ : Tuple = cos(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Any = _sin / 2
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Tuple = -ba
UpperCAmelCase_ : Optional[Any] = 1 + alpha
UpperCAmelCase_ : Dict = -2 * _cos
UpperCAmelCase_ : Optional[int] = 1 - alpha
UpperCAmelCase_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ) ):
UpperCAmelCase_ : Any = tau * frequency / samplerate
UpperCAmelCase_ : Any = sin(__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = cos(__lowerCamelCase )
UpperCAmelCase_ : str = _sin / (2 * q_factor)
UpperCAmelCase_ : List[str] = 1 - alpha
UpperCAmelCase_ : str = -2 * _cos
UpperCAmelCase_ : Any = 1 + alpha
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ), ):
UpperCAmelCase_ : Dict = tau * frequency / samplerate
UpperCAmelCase_ : Union[str, Any] = sin(__lowerCamelCase )
UpperCAmelCase_ : int = cos(__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = _sin / (2 * q_factor)
UpperCAmelCase_ : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase_ : List[Any] = 1 + alpha * big_a
UpperCAmelCase_ : Tuple = -2 * _cos
UpperCAmelCase_ : Tuple = 1 - alpha * big_a
UpperCAmelCase_ : str = 1 + alpha / big_a
UpperCAmelCase_ : List[str] = -2 * _cos
UpperCAmelCase_ : List[str] = 1 - alpha / big_a
UpperCAmelCase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ), ):
UpperCAmelCase_ : str = tau * frequency / samplerate
UpperCAmelCase_ : int = sin(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = cos(__lowerCamelCase )
UpperCAmelCase_ : Tuple = _sin / (2 * q_factor)
UpperCAmelCase_ : List[Any] = 10 ** (gain_db / 40)
UpperCAmelCase_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : int = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : Dict = 2 * sqrt(__lowerCamelCase ) * alpha
UpperCAmelCase_ : List[str] = big_a * (pmc + aaa)
UpperCAmelCase_ : int = 2 * big_a * mpc
UpperCAmelCase_ : int = big_a * (pmc - aaa)
UpperCAmelCase_ : Dict = ppmc + aaa
UpperCAmelCase_ : Any = -2 * pmpc
UpperCAmelCase_ : List[str] = ppmc - aaa
UpperCAmelCase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 1 / sqrt(2 ), ):
UpperCAmelCase_ : int = tau * frequency / samplerate
UpperCAmelCase_ : Optional[Any] = sin(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = cos(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase_ : Tuple = 10 ** (gain_db / 40)
UpperCAmelCase_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase_ : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase_ : List[Any] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase_ : Any = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase_ : Dict = 2 * sqrt(__lowerCamelCase ) * alpha
UpperCAmelCase_ : Any = big_a * (ppmc + aaa)
UpperCAmelCase_ : Union[str, Any] = -2 * big_a * pmpc
UpperCAmelCase_ : Dict = big_a * (ppmc - aaa)
UpperCAmelCase_ : Optional[int] = pmc + aaa
UpperCAmelCase_ : Union[str, Any] = 2 * mpc
UpperCAmelCase_ : int = pmc - aaa
UpperCAmelCase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
| 61 | 0 |
import requests
from bsa import BeautifulSoup
def lowercase (SCREAMING_SNAKE_CASE_ : str = "AAPL" ) -> List[Any]:
SCREAMING_SNAKE_CASE = F'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__lowerCamelCase ).text , 'html.parser' )
SCREAMING_SNAKE_CASE = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 360 |
"""simple docstring"""
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = [0] * size
SCREAMING_SNAKE_CASE = [0] * size
@staticmethod
def __A ( lowerCAmelCase__ ) -> int:
return index | (index + 1)
@staticmethod
def __A ( lowerCAmelCase__ ) -> int:
return (index & (index + 1)) - 1
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = value
while index < self.size:
SCREAMING_SNAKE_CASE = self.get_prev(lowerCAmelCase__ ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_next(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE = 0
while left <= right:
SCREAMING_SNAKE_CASE = self.get_prev(lowerCAmelCase__ )
if left <= current_left:
SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , self.tree[right] )
SCREAMING_SNAKE_CASE = current_left
else:
SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 0 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def __UpperCAmelCase ( __UpperCamelCase ):
if hor == 1_28:
__lowercase : Any = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
__lowercase : List[str] = (32, 1_28, 2_56)
__lowercase : Optional[Any] = ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 32:
__lowercase : Tuple = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
__lowercase : Tuple = (32, 64, 1_28, 2_56)
__lowercase : Tuple = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
__lowercase : Tuple = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
__lowercase : Dict = model.state_dict()
__lowercase : Optional[Any] = {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 14,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 6_55_36,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
__lowercase : int = UNetaDModel(**__UpperCamelCase )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__lowercase : Optional[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowercase : Dict = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , '''w''' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( ):
__lowercase : List[Any] = {
'''in_channels''': 14,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (32, 64, 1_28, 2_56),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 6_55_36,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
__lowercase : List[Any] = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
__lowercase : Optional[Any] = model
__lowercase : str = UNetaDModel(**__UpperCamelCase )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__lowercase : List[str] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowercase : Optional[int] = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 249 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , ):
__lowercase : Tuple = {}
if train_file is not None:
__lowercase : List[Any] = [train_file]
if eval_file is not None:
__lowercase : List[str] = [eval_file]
if test_file is not None:
__lowercase : List[Any] = [test_file]
__lowercase : List[str] = datasets.load_dataset('''csv''' , data_files=__UpperCamelCase )
__lowercase : str = list(ds[list(files.keys() )[0]].features.keys() )
__lowercase : Union[str, Any] = features_name.pop(__UpperCamelCase )
__lowercase : List[str] = list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase : List[str] = {label: i for i, label in enumerate(__UpperCamelCase )}
__lowercase : Optional[Any] = tokenizer.model_input_names
__lowercase : Optional[Any] = {}
if len(__UpperCamelCase ) == 1:
for k in files.keys():
__lowercase : str = ds[k].map(
lambda __UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) , batched=__UpperCamelCase , )
elif len(__UpperCamelCase ) == 2:
for k in files.keys():
__lowercase : List[Any] = ds[k].map(
lambda __UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' , ) , batched=__UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase : Dict = {k: v for k, v in ex.items() if k in input_names}
__lowercase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase : Tuple = {k: v for k, v in ex.items() if k in input_names}
__lowercase : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowercase : Dict = labelaid[ex[label_name]]
yield (d, label)
__lowercase : str = (
tf.data.Dataset.from_generator(
__UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase : List[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase : Optional[Any] = (
tf.data.Dataset.from_generator(
__UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase : Tuple = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase : str = (
tf.data.Dataset.from_generator(
__UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a_ = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
UpperCamelCase =field(metadata={"help": "Which column contains the label"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "The path of the training file"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "The path of the development file"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "The path of the test file"} )
UpperCamelCase =field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase =field(
default=snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class UpperCAmelCase_ :
UpperCamelCase =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCamelCase =field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCamelCase =field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase =field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase ,__lowercase ,__lowercase : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase ,__lowercase ,__lowercase ,__lowercase : Any = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__UpperCamelCase ) , labelaid=__UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__UpperCamelCase ) -> Dict:
__lowercase : List[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase : Optional[Any] = TFTrainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase : List[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase : List[Any] = trainer.evaluate()
__lowercase : int = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(__UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(__UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 249 | 1 |
'''simple docstring'''
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def _lowerCAmelCase ( lowercase=None ) -> Any:
__lowerCAmelCase = argparse.ArgumentParser(add_help=lowercase , allow_abbrev=lowercase )
# The main config parser
__lowerCAmelCase = config_command_parser(lowercase )
# The subparser to add commands to
__lowerCAmelCase = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(lowercase , parents=[parent_parser] )
update_command_parser(lowercase , parents=[parent_parser] )
return config_parser
def _lowerCAmelCase ( ) -> List[Any]:
__lowerCAmelCase = get_config_parser()
__lowerCAmelCase = config_parser.parse_args()
if not hasattr(lowercase , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 352 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : List[str] = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""decision_transformer"""
a : List[Any] =["""past_key_values"""]
a : Dict ={
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=17,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=1_28,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=10_24,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="relu",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = state_dim
__lowerCAmelCase = act_dim
__lowerCAmelCase = hidden_size
__lowerCAmelCase = max_ep_len
__lowerCAmelCase = action_tanh
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scale_attn_weights
__lowerCAmelCase = use_cache
__lowerCAmelCase = scale_attn_by_inverse_layer_idx
__lowerCAmelCase = reorder_and_upcast_attn
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 46 | 0 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def a__ ( _UpperCamelCase : BertModel ,_UpperCamelCase : str ,_UpperCamelCase : str ):
__lowerCamelCase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__lowerCamelCase = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
__lowerCamelCase = model.state_dict()
def to_tf_var_name(_UpperCamelCase : str ):
for patt, repl in iter(_UpperCamelCase ):
__lowerCamelCase = name.replace(_UpperCamelCase ,_UpperCamelCase )
return F"""bert/{name}"""
def create_tf_var(_UpperCamelCase : np.ndarray ,_UpperCamelCase : str ,_UpperCamelCase : tf.Session ):
__lowerCamelCase = tf.dtypes.as_dtype(tensor.dtype )
__lowerCamelCase = tf.get_variable(dtype=_UpperCamelCase ,shape=tensor.shape ,name=_UpperCamelCase ,initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_UpperCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__lowerCamelCase = to_tf_var_name(_UpperCamelCase )
__lowerCamelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__lowerCamelCase = torch_tensor.T
__lowerCamelCase = create_tf_var(tensor=_UpperCamelCase ,name=_UpperCamelCase ,session=_UpperCamelCase )
tf.keras.backend.set_value(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = session.run(_UpperCamelCase )
print(F"""Successfully created {tf_name}: {np.allclose(_UpperCamelCase ,_UpperCamelCase )}""" )
__lowerCamelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(_UpperCamelCase ,os.path.join(_UpperCamelCase ,model_name.replace('''-''' ,'''_''' ) + '''.ckpt''' ) )
def a__ ( _UpperCamelCase : Union[str, Any]=None ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,required=_UpperCamelCase ,help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''Directory in which to save tensorflow model''' )
__lowerCamelCase = parser.parse_args(_UpperCamelCase )
__lowerCamelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,)
convert_pytorch_checkpoint_to_tf(model=_UpperCamelCase ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name )
if __name__ == "__main__":
main()
| 330 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase ( lowerCAmelCase__ ):
@slow
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 128
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(__UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 )
__lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__lowerCamelCase = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCAmelCase ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 330 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = AltDiffusionPipeline
lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_snake_case = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
_snake_case = CLIPTextModel(lowerCAmelCase_ )
_snake_case = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
_snake_case = 77
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> List[str]:
if str(lowerCAmelCase_ ).startswith('mps' ):
_snake_case = torch.manual_seed(lowerCAmelCase_ )
else:
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase ( self ) -> List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCAmelCase ( self ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
torch.manual_seed(0 )
_snake_case = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
_snake_case = RobertaSeriesModelWithTransformation(lowerCAmelCase_ )
_snake_case = text_encoder
_snake_case = AltDiffusionPipeline(**lowerCAmelCase_ )
_snake_case = alt_pipe.to(lowerCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = 'A photo of an astronaut'
_snake_case = alt_pipe(**lowerCAmelCase_ )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self ) -> str:
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
_snake_case = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
_snake_case = RobertaSeriesModelWithTransformation(lowerCAmelCase_ )
_snake_case = text_encoder
_snake_case = AltDiffusionPipeline(**lowerCAmelCase_ )
_snake_case = alt_pipe.to(lowerCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = alt_pipe(**lowerCAmelCase_ )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ) -> Tuple:
# make sure here that pndm scheduler skips prk
_snake_case = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=lowerCAmelCase_ )
_snake_case = alt_pipe.to(lowerCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger'
_snake_case = torch.manual_seed(0 )
_snake_case = alt_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
_snake_case = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
_snake_case = alt_pipe.to(lowerCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger'
_snake_case = torch.manual_seed(0 )
_snake_case = alt_pipe([prompt] , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type='numpy' )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 351 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''gpt_neo'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase_=5_0257 , lowerCAmelCase_=2048 , lowerCAmelCase_=2048 , lowerCAmelCase_=24 , lowerCAmelCase_=[[["global", "local"], 12]] , lowerCAmelCase_=16 , lowerCAmelCase_=None , lowerCAmelCase_=256 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_0256 , lowerCAmelCase_=5_0256 , **lowerCAmelCase_ , ) -> Tuple:
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_layers
_snake_case = num_heads
_snake_case = intermediate_size
_snake_case = window_size
_snake_case = activation_function
_snake_case = resid_dropout
_snake_case = embed_dropout
_snake_case = attention_dropout
_snake_case = classifier_dropout
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = attention_types
_snake_case = self.expand_attention_types_params(lowerCAmelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ ) -> Any:
_snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
import torch
_snake_case = input.size()
_snake_case = len(UpperCamelCase__ )
_snake_case = shape[dimension]
_snake_case = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode='floor' ) + 1
_snake_case = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_snake_case = [slice(UpperCamelCase__ )] * rank
_snake_case = indices
_snake_case = input[s]
_snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
import torch
_snake_case = torch.arange(1 , UpperCamelCase__ )
_snake_case = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = remainders == 0
_snake_case = candidates[divisor_indices]
_snake_case = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode='floor' )
class UpperCamelCase_ ( _lowerCamelCase ):
@property
def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCAmelCase ( self ) -> int:
return self._config.num_heads
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ) -> Mapping[str, Any]:
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self ) -> int:
return 13
| 295 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
lowerCAmelCase__ : Optional[int] = hex_num[0] == """-"""
if is_negative:
lowerCAmelCase__ : Any = hex_num[1:]
try:
lowerCAmelCase__ : Optional[int] = int(UpperCamelCase , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
lowerCAmelCase__ : Optional[int] = """"""
while int_num > 0:
lowerCAmelCase__ : str = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,**__UpperCAmelCase ) -> Tuple:
super().__init__(**__UpperCAmelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[Any] = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : int = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : Optional[int] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This is a photo of {}." ) -> int:
lowerCAmelCase__ : str = load_image(__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.image_processor(images=[image] ,return_tensors=self.framework )
lowerCAmelCase__ : List[Any] = candidate_labels
lowerCAmelCase__ : List[str] = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ : Optional[Any] = self.tokenizer(__UpperCAmelCase ,return_tensors=self.framework ,padding=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = [text_inputs]
return inputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = model_inputs.pop("""candidate_labels""" )
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,__UpperCAmelCase ):
lowerCAmelCase__ : int = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : Dict = text_inputs[0][0]
lowerCAmelCase__ : Any = self.model(**__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Union[str, Any] = model_outputs.pop("""candidate_labels""" )
lowerCAmelCase__ : List[str] = model_outputs["""logits"""][0]
if self.framework == "pt":
lowerCAmelCase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ : Optional[Any] = probs.tolist()
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Dict = [scores]
elif self.framework == "tf":
lowerCAmelCase__ : Any = stable_softmax(__UpperCAmelCase ,axis=-1 )
lowerCAmelCase__ : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowerCAmelCase__ : Tuple = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase ,__UpperCAmelCase ) ,key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 37 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__magic_name__ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
__magic_name__ = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
)
__magic_name__ = "|".join(sys.argv[1:])
__magic_name__ = re.compile(rf'''^({joined_dirs}).*?\.py$''')
__magic_name__ = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 152 |
def _lowerCAmelCase ( A__: int = 1000 ):
'''simple docstring'''
UpperCAmelCase = 2**power
UpperCAmelCase = str(A__ )
UpperCAmelCase = list(A__ )
UpperCAmelCase = 0
for i in list_num:
sum_of_num += int(A__ )
return sum_of_num
if __name__ == "__main__":
__magic_name__ = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
__magic_name__ = solution(power)
print("Sum of the digits is: ", result)
| 152 | 1 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[str] = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "mvp"
A_ = ["past_key_values"]
A_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __a=5_0267 , __a=1024 , __a=12 , __a=4096 , __a=16 , __a=12 , __a=4096 , __a=16 , __a=0.0 , __a=0.0 , __a="gelu" , __a=1024 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=0.0 , __a=False , __a=True , __a=1 , __a=0 , __a=2 , __a=True , __a=2 , __a=2 , __a=False , __a=100 , __a=800 , **__a , ):
'''simple docstring'''
__a : Tuple = vocab_size
__a : List[str] = max_position_embeddings
__a : int = d_model
__a : List[Any] = encoder_ffn_dim
__a : List[Any] = encoder_layers
__a : Any = encoder_attention_heads
__a : Tuple = decoder_ffn_dim
__a : List[str] = decoder_layers
__a : List[str] = decoder_attention_heads
__a : List[Any] = dropout
__a : Any = attention_dropout
__a : List[str] = activation_dropout
__a : Optional[Any] = activation_function
__a : List[Any] = init_std
__a : Any = encoder_layerdrop
__a : Optional[Any] = decoder_layerdrop
__a : Dict = classifier_dropout
__a : Union[str, Any] = use_cache
__a : Any = encoder_layers
__a : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__a : Tuple = use_prompt
__a : Tuple = prompt_length
__a : str = prompt_mid_dim
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , **__a , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , __a ):
__a : Optional[Any] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'The config can simply be saved and uploaded again to be fixed.' )
| 27 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ):
__a : set = set()
__a : int
__a : Fraction = Fraction(0 )
__a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a : Union[str, Any] = x_num * y_den + x_den * y_num
__a : Optional[Any] = x_den * y_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
__a : int = x_num * y_num
__a : Optional[Any] = x_den * y_num + x_num * y_den
__a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : List[Any] = x_num * x_num * y_num * y_num
__a : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 27 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_lowercase: int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowercase: str = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
_lowercase: str = {
"unc-nlp/lxmert-base-uncased": 512,
}
_lowercase: Optional[int] = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_INIT_CONFIGURATION
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = LxmertTokenizer
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_="[UNK]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[PAD]" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase_ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase_ ) != tokenize_chinese_chars
):
a = getattr(lowerCamelCase_ , normalizer_state.pop("type" ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**lowerCamelCase_ )
a = do_lower_case
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
a = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 368 |
import math
from numpy import inf
from scipy.integrate import quad
def a( A : float ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
return quad(A , 0 , A , args=(A) )[0]
def a( A : float , A : float ) -> float:
"""simple docstring"""
return math.pow(A , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 71 | 0 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCamelCase_ ( *__lowercase : Optional[Any] , **__lowercase : int ):
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__lowerCamelCase : List[Any] =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Optional[int] , __lowercase : Any , __lowercase : List[Any] ):
'''simple docstring'''
__a = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
__a = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def UpperCamelCase_ ( self : List[str] , __lowercase : Optional[int] , __lowercase : Dict ):
'''simple docstring'''
__a = vqa_pipeline(__lowercase , top_k=1 )
self.assertEqual(
__lowercase , [
[{"""score""": ANY(__lowercase ), """answer""": ANY(__lowercase )}],
[{"""score""": ANY(__lowercase ), """answer""": ANY(__lowercase )}],
] , )
@require_torch
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
__a = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__a = """How many cats are there?"""
__a = vqa_pipeline(image=__lowercase , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
__lowercase , [{"""score""": ANY(__lowercase ), """answer""": ANY(__lowercase )}, {"""score""": ANY(__lowercase ), """answer""": ANY(__lowercase )}] )
__a = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
__lowercase , [{"""score""": ANY(__lowercase ), """answer""": ANY(__lowercase )}, {"""score""": ANY(__lowercase ), """answer""": ANY(__lowercase )}] )
@slow
@require_torch
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
__a = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__a = """How many cats are there?"""
__a = vqa_pipeline(image=__lowercase , question=__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
__a = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
__a = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
| 302 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowerCamelCase :Tuple = '''path-to-your-trained-model'''
lowerCamelCase :Optional[int] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowerCamelCase :Optional[int] = '''A photo of sks dog in a bucket'''
lowerCamelCase :List[Any] = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''') | 206 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = len(_lowerCamelCase )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase__ : Union[str, Any] = 0
print(_lowerCamelCase , end=',' )
# Consider rest of the activities
for j in range(_lowerCamelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_lowerCamelCase , end=',' )
lowerCamelCase__ : Dict = j
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = [1, 3, 0, 5, 8, 5]
A_ : Optional[Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 316 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__snake_case = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__snake_case = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
__snake_case = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = CHRF.CHAR_ORDER , UpperCamelCase_ = CHRF.WORD_ORDER , UpperCamelCase_ = CHRF.BETA , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , ):
'''simple docstring'''
UpperCamelCase__ :List[str] = len(references[0] )
if any(len(UpperCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
UpperCamelCase__ :Optional[Any] = [[refs[i] for refs in references] for i in range(UpperCamelCase_ )]
UpperCamelCase__ :str = CHRF(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :int = sb_chrf.corpus_score(UpperCamelCase_ , UpperCamelCase_ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
} | 97 |
from copy import deepcopy
class _a :
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : list[int] | None = None , _SCREAMING_SNAKE_CASE : int | None = None )-> None:
if arr is None and size is not None:
lowerCAmelCase__ : str = size
lowerCAmelCase__ : Optional[Any] = [0] * size
elif arr is not None:
self.init(_SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Either arr or size must be specified''' )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : list[int] )-> None:
lowerCAmelCase__ : int = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = deepcopy(_SCREAMING_SNAKE_CASE )
for i in range(1 , self.size ):
lowerCAmelCase__ : Optional[Any] = self.next_(_SCREAMING_SNAKE_CASE )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase__( self : Optional[int] )-> list[int]:
lowerCAmelCase__ : Any = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCAmelCase__ : Dict = self.next_(_SCREAMING_SNAKE_CASE )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int )-> int:
return index + (index & (-index))
@staticmethod
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int )-> int:
return index - (index & (-index))
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )-> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCAmelCase__ : List[str] = self.next_(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )-> None:
self.add(_SCREAMING_SNAKE_CASE , value - self.get(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : int )-> int:
if right == 0:
return 0
lowerCAmelCase__ : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCAmelCase__ : Dict = self.prev(_SCREAMING_SNAKE_CASE )
return result
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )-> int:
return self.prefix(_SCREAMING_SNAKE_CASE ) - self.prefix(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : int )-> int:
return self.query(_SCREAMING_SNAKE_CASE , index + 1 )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : int )-> int:
value -= self.tree[0]
if value < 0:
return -1
lowerCAmelCase__ : Any = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCAmelCase__ : List[Any] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 | 0 |
import mpmath # for roots of unity
import numpy as np
class lowercase :
'''simple docstring'''
def __init__(self , __a=None , __a=None ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = list(poly_a or [0] )[:]
UpperCAmelCase__ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
UpperCAmelCase__ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
UpperCAmelCase__ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
UpperCAmelCase__ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
UpperCAmelCase__ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
UpperCAmelCase__ = self.__multiply()
def UpperCamelCase__ (self , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(__a ) <= 1:
return dft[0]
#
UpperCAmelCase__ = self.c_max_length // 2
while next_ncol > 0:
UpperCAmelCase__ = [[] for i in range(__a )]
UpperCAmelCase__ = self.root**next_ncol
# First half of next step
UpperCAmelCase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__a ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
UpperCAmelCase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__a ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
UpperCAmelCase__ = new_dft
UpperCAmelCase__ = next_ncol // 2
return dft[0]
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.__dft('A' )
UpperCAmelCase__ = self.__dft('B' )
UpperCAmelCase__ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
UpperCAmelCase__ = 2
while next_ncol <= self.c_max_length:
UpperCAmelCase__ = [[] for i in range(__a )]
UpperCAmelCase__ = self.root ** (next_ncol // 2)
UpperCAmelCase__ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
UpperCAmelCase__ = new_inverse_c
next_ncol *= 2
# Unpack
UpperCAmelCase__ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = 'A = ' + ' + '.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A] ) )
UpperCAmelCase__ = 'B = ' + ' + '.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B] ) )
UpperCAmelCase__ = 'A*B = ' + ' + '.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.product ) )
return F"{a}\n{b}\n{c}"
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BioGptTokenizer
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCAmelCase__ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(__a ) )
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'lower newer'
UpperCAmelCase__ = 'lower newer'
return input_text, output_text
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ = 'lower'
UpperCAmelCase__ = ['low', 'er</w>']
UpperCAmelCase__ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = tokens + ['<unk>']
UpperCAmelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
UpperCAmelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 335 | 1 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class A__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=9_9 , __magic_name__=3_2 , __magic_name__=2 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=1_6 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ):
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : List[str] = 1_3
lowerCamelCase : Union[str, Any] = 7
lowerCamelCase : int = True
lowerCamelCase : Optional[int] = True
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
lowerCamelCase : Union[str, Any] = 9_9
lowerCamelCase : List[str] = 3_2
lowerCamelCase : Optional[Any] = 2
lowerCamelCase : Union[str, Any] = 4
lowerCamelCase : str = 3_7
lowerCamelCase : Union[str, Any] = """gelu"""
lowerCamelCase : str = 0.1
lowerCamelCase : List[str] = 0.1
lowerCamelCase : str = 5_1_2
lowerCamelCase : int = 1_6
lowerCamelCase : List[str] = 2
lowerCamelCase : Optional[Any] = 0.02
lowerCamelCase : Optional[Any] = 3
lowerCamelCase : List[Any] = 4
lowerCamelCase : Dict = None
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Optional[int] = None
if self.use_input_mask:
lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Any = None
lowerCamelCase : int = None
if self.use_labels:
lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Dict = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__magic_name__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Union[str, Any] = TFRoFormerModel(config=__magic_name__ )
lowerCamelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase : int = [input_ids, input_mask]
lowerCamelCase : Any = model(__magic_name__ )
lowerCamelCase : Tuple = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = True
lowerCamelCase : Tuple = TFRoFormerForCausalLM(config=__magic_name__ )
lowerCamelCase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase : Tuple = model(__magic_name__ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Optional[int] = TFRoFormerForMaskedLM(config=__magic_name__ )
lowerCamelCase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase : Dict = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Dict = self.num_labels
lowerCamelCase : Optional[int] = TFRoFormerForSequenceClassification(config=__magic_name__ )
lowerCamelCase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase : Dict = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = self.num_choices
lowerCamelCase : str = TFRoFormerForMultipleChoice(config=__magic_name__ )
lowerCamelCase : List[str] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : Dict = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : int = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = self.num_labels
lowerCamelCase : List[str] = TFRoFormerForTokenClassification(config=__magic_name__ )
lowerCamelCase : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase : int = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : str = TFRoFormerForQuestionAnswering(config=__magic_name__ )
lowerCamelCase : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase : Any = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[int] = config_and_inputs
lowerCamelCase : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCAmelCase : List[Any] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : str = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : str = False
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCamelCase__ ( self ):
lowerCamelCase : str = TFRoFormerModelTester(self )
lowerCamelCase : Optional[int] = ConfigTester(self , config_class=__magic_name__ , hidden_size=3_7 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(__magic_name__ )
@require_tf
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowerCamelCase : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Optional[Any] = model(__magic_name__ )[0]
# TODO Replace vocab size
lowerCamelCase : Optional[Any] = 5_0_0_0_0
lowerCamelCase : Union[str, Any] = [1, 6, vocab_size]
self.assertEqual(output.shape , __magic_name__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCamelCase : Union[str, Any] = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1e-4 )
@require_tf
class A__ ( unittest.TestCase):
_UpperCAmelCase : List[str] = 1E-4
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = tf.constant([[4, 1_0]] )
lowerCamelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCamelCase : Optional[Any] = emba(input_ids.shape )
lowerCamelCase : Tuple = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(__magic_name__ , __magic_name__ , atol=self.tolerance )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
lowerCamelCase : Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
lowerCamelCase : List[str] = emba.weight[:3, :5]
tf.debugging.assert_near(__magic_name__ , __magic_name__ , atol=self.tolerance )
@require_tf
class A__ ( unittest.TestCase):
_UpperCAmelCase : Optional[int] = 1E-4
def UpperCamelCase__ ( self ):
# 2,12,16,64
lowerCamelCase : Union[str, Any] = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
lowerCamelCase : Dict = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
lowerCamelCase : Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
lowerCamelCase : List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
lowerCamelCase , lowerCamelCase : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase : Union[str, Any] = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
lowerCamelCase : str = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __magic_name__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __magic_name__ , atol=self.tolerance )
| 287 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = MgpstrTokenizer
UpperCAmelCase : List[str] = False
UpperCAmelCase : Optional[int] = {}
UpperCAmelCase : List[str] = False
def __snake_case ( self ) -> int:
super().setUp()
# fmt: off
lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A_ ) + """\n""" )
def __snake_case ( self , **A_ ) -> Optional[int]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A_ )
def __snake_case ( self , A_ ) -> Dict:
lowerCAmelCase = """tester"""
lowerCAmelCase = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __snake_case ( self ) -> Optional[int]:
pass
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
lowerCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A_ )
self.assertEqual(len(A_ ) , 1 )
lowerCAmelCase = tokenizer.decode(A_ , skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase, lowerCAmelCase = self.get_input_output_texts(A_ )
lowerCAmelCase = tokenizer.tokenize(A_ )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(A_ )
lowerCAmelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ) , 0 )
lowerCAmelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(text_a.replace(""" """ , """""" ) , A_ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __snake_case ( self ) -> Tuple:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __snake_case ( self ) -> Tuple:
pass | 187 |
'''simple docstring'''
class __snake_case:
'''simple docstring'''
def __init__( self ) -> None:
lowerCAmelCase = {} # Mapping from char to TrieNode
lowerCAmelCase = False
def __snake_case ( self , A_ ) -> None:
for word in words:
self.insert(A_ )
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
lowerCAmelCase = TrieNode()
lowerCAmelCase = curr.nodes[char]
lowerCAmelCase = True
def __snake_case ( self , A_ ) -> bool:
lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
lowerCAmelCase = curr.nodes[char]
return curr.is_leaf
def __snake_case ( self , A_ ) -> None:
def _delete(A_ , A_ , A_ ) -> bool:
if index == len(A_ ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCAmelCase = False
return len(curr.nodes ) == 0
lowerCAmelCase = word[index]
lowerCAmelCase = curr.nodes.get(A_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCAmelCase = _delete(A_ , A_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , A_ , 0 )
def _snake_case ( _SCREAMING_SNAKE_CASE : TrieNode , _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
if node.is_leaf:
print(_SCREAMING_SNAKE_CASE , end=""" """ )
for key, value in node.nodes.items():
print_words(_SCREAMING_SNAKE_CASE , word + key )
def _snake_case ( ) -> bool:
"""simple docstring"""
lowerCAmelCase = """banana bananas bandana band apple all beast""".split()
lowerCAmelCase = TrieNode()
root.insert_many(_SCREAMING_SNAKE_CASE )
# print_words(root, "")
assert all(root.find(_SCREAMING_SNAKE_CASE ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool ) -> None:
"""simple docstring"""
print(str(_SCREAMING_SNAKE_CASE ) , """works!""" if passes else """doesn't work :(""" )
def _snake_case ( ) -> None:
"""simple docstring"""
assert test_trie()
def _snake_case ( ) -> None:
"""simple docstring"""
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main() | 187 | 1 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , '''num_encoder_blocks''' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str]=1_3 , lowerCAmelCase_ : Union[str, Any]=6_4 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : List[Any]=[2, 2, 2, 2] , lowerCAmelCase_ : Tuple=[8, 4, 2, 1] , lowerCAmelCase_ : Any=[1_6, 3_2, 6_4, 1_2_8] , lowerCAmelCase_ : str=[1, 4, 8, 1_6] , lowerCAmelCase_ : Any=[1, 2, 4, 8] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : str=None , ):
"""simple docstring"""
_A: Union[str, Any] = parent
_A: Tuple = batch_size
_A: Union[str, Any] = image_size
_A: Union[str, Any] = num_channels
_A: int = num_encoder_blocks
_A: int = sr_ratios
_A: List[Any] = depths
_A: str = hidden_sizes
_A: int = downsampling_rates
_A: Optional[int] = num_attention_heads
_A: Dict = is_training
_A: Union[str, Any] = use_labels
_A: List[Any] = hidden_act
_A: List[str] = hidden_dropout_prob
_A: int = attention_probs_dropout_prob
_A: Union[str, Any] = initializer_range
_A: List[Any] = num_labels
_A: str = scope
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A: List[Any] = None
if self.use_labels:
_A: Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A: Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : str ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ):
"""simple docstring"""
_A: Optional[int] = SegformerModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: Any = model(lowerCAmelCase_ )
_A: List[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: int = self.num_labels
_A: Union[str, Any] = SegformerForSemanticSegmentation(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: Union[str, Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
_A: Tuple = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict ):
"""simple docstring"""
_A: Dict = 1
_A: List[Any] = SegformerForSemanticSegmentation(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: Optional[Any] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowerCAmelCase_ )
_A: str = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertGreater(result.loss , 0.0 )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: int = self.prepare_config_and_inputs()
_A , _A , _A: Dict = config_and_inputs
_A: List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Union[str, Any] = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Optional[Any] = False
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: str = SegformerModelTester(self )
_A: int = SegformerConfigTester(self , config_class=lowerCAmelCase_ )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCAmelCase_ )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def __magic_name__ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def __magic_name__ ( self : int ):
"""simple docstring"""
pass
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A , _A: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: str = model_class(lowerCAmelCase_ )
_A: Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A: str = [*signature.parameters.keys()]
_A: int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A , _A: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A: Optional[int] = True
for model_class in self.all_model_classes:
_A: Union[str, Any] = True
_A: Optional[int] = False
_A: Any = True
_A: int = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_A: Dict = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A: Any = outputs.attentions
_A: Tuple = sum(self.model_tester.depths )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A: List[Any] = True
_A: List[Any] = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_A: Optional[int] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A: Dict = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# verify the first attentions (first block, first layer)
_A: Union[str, Any] = (self.model_tester.image_size // 4) ** 2
_A: List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_A: int = (self.model_tester.image_size // 3_2) ** 2
_A: Union[str, Any] = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_A: Tuple = len(lowerCAmelCase_ )
# Check attention is always last and order is fine
_A: Union[str, Any] = True
_A: Tuple = True
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_A: Optional[int] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase_ ) )
_A: List[Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# verify the first attentions (first block, first layer)
_A: str = (self.model_tester.image_size // 4) ** 2
_A: List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __magic_name__ ( self : int ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
_A: Dict = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_A: str = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A: List[Any] = outputs.hidden_states
_A: Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_A , _A: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Any = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A: Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
_A , _A: int = self.model_tester.prepare_config_and_inputs_for_common()
_A: str = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase_ ):
continue
_A: List[str] = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_A: Optional[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_A: Any = model(**lowerCAmelCase_ ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : int ):
"""simple docstring"""
pass
@slow
def __magic_name__ ( self : Dict ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A: Any = SegformerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCamelCase__ ( ) -> Dict:
_A: List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
# only resize + normalize
_A: List[Any] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=lowerCAmelCase_ , align=lowerCAmelCase_ , do_random_crop=lowerCAmelCase_ )
_A: Tuple = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
lowerCAmelCase_ )
_A: Tuple = prepare_img()
_A: List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' )
_A: Any = encoded_inputs.pixel_values.to(lowerCAmelCase_ )
with torch.no_grad():
_A: str = model(lowerCAmelCase_ )
_A: Optional[Any] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A: str = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : int ):
"""simple docstring"""
# only resize + normalize
_A: List[str] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=lowerCAmelCase_ , align=lowerCAmelCase_ , do_random_crop=lowerCAmelCase_ )
_A: Any = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(lowerCAmelCase_ )
_A: Tuple = prepare_img()
_A: Optional[int] = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' )
_A: Union[str, Any] = encoded_inputs.pixel_values.to(lowerCAmelCase_ )
with torch.no_grad():
_A: List[Any] = model(lowerCAmelCase_ )
_A: Union[str, Any] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A: Any = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase_ , atol=1e-1 ) )
@slow
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
# only resize + normalize
_A: List[str] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=lowerCAmelCase_ , align=lowerCAmelCase_ , do_random_crop=lowerCAmelCase_ )
_A: Optional[Any] = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
lowerCAmelCase_ )
_A: Dict = prepare_img()
_A: Dict = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' )
_A: Optional[int] = encoded_inputs.pixel_values.to(lowerCAmelCase_ )
with torch.no_grad():
_A: List[Any] = model(lowerCAmelCase_ )
_A: Optional[Any] = outputs.logits.detach().cpu()
_A: Any = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase_ , target_sizes=[(5_0_0, 3_0_0)] )
_A: Any = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase_ )
_A: str = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase_ )
_A: str = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase_ )
| 121 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCamelCase__ ( a , a , a=8 ) -> List[Any]:
_A: int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A: str = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase__ ( a , a=5_12 , a=5_12 ) -> Dict:
_A: Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_A: Tuple = np.array(pil_image.convert('''RGB''' ) )
_A: List[str] = arr.astype(np.floataa ) / 127.5 - 1
_A: Tuple = np.transpose(a , [2, 0, 1] )
_A: Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , movq=lowerCAmelCase_ , )
_A: List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
# get the original timestep using init_timestep
_A: Union[str, Any] = min(int(num_inference_steps * strength ) , lowerCAmelCase_ )
_A: str = max(num_inference_steps - init_timestep , 0 )
_A: str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase_ )}""" )
_A: Optional[int] = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_A: Union[str, Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_A: Optional[int] = image
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase_ )
]
_A: Optional[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
else:
_A: Optional[int] = self.movq.encode(lowerCAmelCase_ ).latent_dist.sample(lowerCAmelCase_ )
_A: int = self.movq.config.scaling_factor * init_latents
_A: Optional[Any] = torch.cat([init_latents] , dim=0 )
_A: Any = init_latents.shape
_A: Optional[Any] = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
# get latents
_A: Union[str, Any] = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[str] = init_latents
return latents
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
_A: int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowerCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A: int = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A , _A: List[Any] = cpu_offload_with_hook(lowerCAmelCase_ , lowerCAmelCase_ , prev_module_hook=lowerCAmelCase_ )
# We'll offload the last model manually.
_A: Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_ )
def __call__( self : Optional[Any] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : float = 0.3 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
_A: Any = self._execution_device
_A: Any = guidance_scale > 1.0
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Any = torch.cat(lowerCAmelCase_ , dim=0 )
_A: int = image_embeds.shape[0]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Dict = torch.cat(lowerCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
_A: Any = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: str = negative_image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase_ )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[str] = [image]
if not all(isinstance(lowerCAmelCase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(lowerCAmelCase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_A: List[str] = torch.cat([prepare_image(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in image] , dim=0 )
_A: Tuple = image.to(dtype=image_embeds.dtype , device=lowerCAmelCase_ )
_A: Optional[Any] = self.movq.encode(lowerCAmelCase_ )['''latents''']
_A: Optional[int] = latents.repeat_interleave(lowerCAmelCase_ , dim=0 )
self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_ )
_A , _A: List[Any] = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_A , _A: Optional[int] = downscale_height_and_width(lowerCAmelCase_ , lowerCAmelCase_ , self.movq_scale_factor )
_A: Any = self.prepare_latents(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_A: Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A: str = {'''image_embeds''': image_embeds}
_A: Optional[int] = self.unet(
sample=lowerCAmelCase_ , timestep=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , added_cond_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
if do_classifier_free_guidance:
_A , _A: str = noise_pred.split(latents.shape[1] , dim=1 )
_A , _A: int = noise_pred.chunk(2 )
_A , _A: int = variance_pred.chunk(2 )
_A: Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A: List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A , _A: Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A: Any = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ , )[0]
# post-processing
_A: Tuple = self.movq.decode(lowerCAmelCase_ , force_not_quantize=lowerCAmelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_A: int = image * 0.5 + 0.5
_A: Any = image.clamp(0 , 1 )
_A: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A: Union[str, Any] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 121 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def __A ( a_ :int , a_ :int , a_ :bool , a_ :list[int] , a_ :float) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''')
if not scores:
raise ValueError('''Scores cannot be empty''')
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , snake_case_ , snake_case_ , snake_case_) , minimax(depth + 1 , node_index * 2 + 1 , snake_case_ , snake_case_ , snake_case_) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , snake_case_ , snake_case_ , snake_case_) , minimax(depth + 1 , node_index * 2 + 1 , snake_case_ , snake_case_ , snake_case_) , )
)
def __A ( ) -> None:
__a : List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
__a : Dict = math.log(len(snake_case_) , 2)
print(F"""Optimal value : {minimax(0 , 0 , snake_case_ , snake_case_ , snake_case_)}""")
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 368 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 188 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =(num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _a( ):
'''simple docstring'''
print(sum_of_series(1, 1, 1_0 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 152 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case__ ):
_lowercase : int = (DPMSolverSinglestepScheduler,)
_lowercase : Optional[Any] = (('num_inference_steps', 2_5),)
def UpperCAmelCase ( self : Dict , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
__lowerCAmelCase: Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ) -> Any:
__lowerCAmelCase: Optional[int] = dict(self.forward_default_kwargs )
__lowerCAmelCase: int = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: int = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: Dict = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase: str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: str = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : str ) -> str:
pass
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Any=0 , **UpperCAmelCase : Optional[int] ) -> Tuple:
__lowerCAmelCase: Tuple = dict(self.forward_default_kwargs )
__lowerCAmelCase: Tuple = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Tuple = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Dict = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase: List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: List[str] = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Dict = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : int , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
if scheduler is None:
__lowerCAmelCase: str = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.scheduler_classes[0]
__lowerCAmelCase: List[str] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = 1_0
__lowerCAmelCase: Dict = self.dummy_model()
__lowerCAmelCase: Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Dict = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Any = 5_0
__lowerCAmelCase: int = self.dummy_model()
__lowerCAmelCase: List[str] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__lowerCAmelCase: Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Dict = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
__lowerCAmelCase: Tuple = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Any = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : List[str] ) -> List[str]:
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='dpmsolver++' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
__lowerCAmelCase: Dict = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self : Optional[Any] ) -> str:
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> Any:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase ( self : List[Any] ) -> str:
self.check_over_configs(variance_type=UpperCAmelCase )
self.check_over_configs(variance_type='learned_range' )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCAmelCase ( self : Any ) -> int:
__lowerCAmelCase: Any = self.full_loop()
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = self.full_loop(use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: str = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase: List[str] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def UpperCAmelCase ( self : str ) -> List[str]:
__lowerCAmelCase: int = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase: Any = self.scheduler_classes[0]
__lowerCAmelCase: Optional[Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase: List[str] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = 1_0
__lowerCAmelCase: Union[str, Any] = self.dummy_model()
__lowerCAmelCase: int = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 322 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : List[str] = len(lowercase__ )
_lowerCamelCase : List[str] = max(lowercase__ )
_lowerCamelCase : List[str] = min(lowercase__ )
# create the counting array
_lowerCamelCase : List[Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
_lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
_lowerCamelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 365 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """AutoTokenizer"""
lowerCamelCase__ = ["""tokenizer"""]
lowerCamelCase__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , lowercase , lowercase=None ):
super().__init__(lowercase )
_lowerCamelCase : Optional[int] = speaker_embeddings
@classmethod
def A_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
_lowerCamelCase : Optional[Any] = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(lowercase , lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_lowerCamelCase : List[Any] = None
else:
with open(lowercase ) as speaker_embeddings_json:
_lowerCamelCase : Union[str, Any] = json.load(lowercase )
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def A_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , 'v2' ) , exist_ok=lowercase )
_lowerCamelCase : int = {}
_lowerCamelCase : List[Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCamelCase : Optional[Any] = self._load_voice_preset(lowercase )
_lowerCamelCase : Any = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , lowercase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowercase , )
_lowerCamelCase : List[str] = os.path.join(lowercase , F'''{prompt_key}_{key}.npy''' )
_lowerCamelCase : Optional[Any] = tmp_dict
with open(os.path.join(lowercase , lowercase ) , 'w' ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def A_ ( self , lowercase = None , **lowercase ):
_lowerCamelCase : Tuple = self.speaker_embeddings[voice_preset]
_lowerCamelCase : Any = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_lowerCamelCase : Union[str, Any] = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_lowerCamelCase : List[str] = np.load(lowercase )
return voice_preset_dict
def A_ ( self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCamelCase : Any = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith('.npz' ):
_lowerCamelCase : Optional[Any] = voice_preset + '.npz'
_lowerCamelCase : Union[str, Any] = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
_lowerCamelCase : Tuple = BatchFeature(data=lowercase , tensor_type=lowercase )
_lowerCamelCase : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
_lowerCamelCase : Optional[int] = voice_preset
return encoded_text | 12 | 0 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''torch''', '''scipy''']
def __init__( self : int , *_A : str , **_A : Any ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *_A : int , **_A : Any ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCAmelCase__ ( cls : int , *_A : Tuple , **_A : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 303 |
import math
import os
import sys
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
try:
with open(snake_case , '''rb''' ) as binary_file:
__SCREAMING_SNAKE_CASE : int = binary_file.read()
for dat in data:
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lexicon.pop(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = last_match_id
if math.loga(snake_case ).is_integer():
for curr_key in lexicon:
__SCREAMING_SNAKE_CASE : int = '''0''' + lexicon[curr_key]
__SCREAMING_SNAKE_CASE : List[str] = bin(snake_case )[2:]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {'''0''': '''0''', '''1''': '''1'''}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = '''''', ''''''
__SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
for i in range(len(snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__SCREAMING_SNAKE_CASE : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case )
index += 1
__SCREAMING_SNAKE_CASE : Tuple = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__SCREAMING_SNAKE_CASE : Dict = lexicon[curr_string]
result += last_match_id
return result
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.getsize(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = bin(snake_case )[2:]
__SCREAMING_SNAKE_CASE : int = len(snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 8
try:
with open(snake_case , '''wb''' ) as opened_file:
__SCREAMING_SNAKE_CASE : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case ) , snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = read_file_binary(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = compress_data(snake_case )
__SCREAMING_SNAKE_CASE : Dict = add_file_length(snake_case , snake_case )
write_file_binary(snake_case , snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 303 | 1 |
import doctest
from collections import deque
import numpy as np
class lowerCAmelCase :
def __init__( self : List[Any] ) -> None:
lowerCamelCase__ : List[str] = [2, 1, 2, -1]
lowerCamelCase__ : int = [1, 2, 3, 4]
def A_ ( self : Optional[int] ) -> list[float]:
lowerCamelCase__ : Dict = len(self.first_signal )
lowerCamelCase__ : int = len(self.second_signal )
lowerCamelCase__ : int = max(UpperCAmelCase , UpperCAmelCase )
# create a zero matrix of max_length x max_length
lowerCamelCase__ : Dict = [[0] * max_length for i in range(UpperCAmelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = deque(self.second_signal )
rotated_signal.rotate(UpperCAmelCase )
for j, item in enumerate(UpperCAmelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCamelCase__ : Any = np.matmul(np.transpose(UpperCAmelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCAmelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 45 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCAmelCase ( pl.LightningModule ):
def __init__( self : List[str] , UpperCAmelCase : Optional[Any] ) -> int:
super().__init__()
lowerCamelCase__ : List[str] = model
lowerCamelCase__ : Dict = 2
lowerCamelCase__ : Dict = nn.Linear(self.model.config.hidden_size , self.num_labels )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
# load longformer model from model identifier
lowerCamelCase__ : List[str] = LongformerModel.from_pretrained(_UpperCAmelCase )
lowerCamelCase__ : Dict = LightningModel(_UpperCAmelCase )
lowerCamelCase__ : List[Any] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
lowerCamelCase__ : Dict = LongformerForQuestionAnswering.from_pretrained(_UpperCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase : Any = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 45 | 1 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *__UpperCAmelCase : str , **__UpperCAmelCase : Tuple ) ->None:
"""simple docstring"""
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
# fmt: off
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
a = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
a = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , **__UpperCAmelCase : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_image_processor()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
UpperCamelCase_ : Optional[Any] = logging.getLogger(__name__)
UpperCamelCase_ : List[Any] = {'''facebook/bart-base''': BartForConditionalGeneration}
UpperCamelCase_ : Dict = {'''facebook/bart-base''': BartTokenizer}
def __a ( ) -> Any:
"""simple docstring"""
_snake_case = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=_UpperCamelCase , default=_UpperCamelCase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=_UpperCamelCase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=_UpperCamelCase , default=_UpperCamelCase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=_UpperCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_UpperCamelCase , )
parser.add_argument(
"--config_name" , type=_UpperCamelCase , default=_UpperCamelCase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=_UpperCamelCase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=_UpperCamelCase , default=_UpperCamelCase , help="Where to store the final ONNX file." )
_snake_case = parser.parse_args()
return args
def __a ( _UpperCamelCase: Tuple , _UpperCamelCase: Tuple="cpu" ) -> Dict:
"""simple docstring"""
_snake_case = model_dict[model_name].from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
_snake_case = tokenizer_dict[model_name].from_pretrained(_UpperCamelCase )
if model_name in ["facebook/bart-base"]:
_snake_case = 0
_snake_case = None
_snake_case = 0
return huggingface_model, tokenizer
def __a ( _UpperCamelCase: Dict , _UpperCamelCase: str , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Optional[int] , _UpperCamelCase: Optional[int] ) -> Tuple:
"""simple docstring"""
model.eval()
_snake_case = None
_snake_case = torch.jit.script(BARTBeamSearchGenerator(_UpperCamelCase ) )
with torch.no_grad():
_snake_case = "My friends are cool but they eat too many carbs."
_snake_case = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors="pt" ).to(model.device )
_snake_case = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=_UpperCamelCase , max_length=_UpperCamelCase , early_stopping=_UpperCamelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCamelCase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCamelCase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=_UpperCamelCase , )
logger.info("Model exported to {}".format(_UpperCamelCase ) )
_snake_case = remove_dup_initializers(os.path.abspath(_UpperCamelCase ) )
logger.info("Deduplicated and optimized model written to {}".format(_UpperCamelCase ) )
_snake_case = onnxruntime.InferenceSession(_UpperCamelCase )
_snake_case = ort_sess.run(
_UpperCamelCase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(_UpperCamelCase ),
"max_length": np.array(_UpperCamelCase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def __a ( ) -> Optional[int]:
"""simple docstring"""
_snake_case = parse_args()
_snake_case = 5
_snake_case = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_snake_case = torch.device(args.device )
_snake_case , _snake_case = load_model_tokenizer(args.model_name_or_path , _UpperCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(_UpperCamelCase )
if args.max_length:
_snake_case = args.max_length
if args.num_beams:
_snake_case = args.num_beams
if args.output_file_path:
_snake_case = args.output_file_path
else:
_snake_case = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 142 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def __a ( _UpperCamelCase: int ) -> str:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_snake_case = precision
_snake_case = ceil(precision / 14 )
_snake_case = 426_880 * Decimal(10_005 ).sqrt()
_snake_case = 1
_snake_case = 13_591_409
_snake_case = Decimal(_UpperCamelCase )
for k in range(1 , _UpperCamelCase ):
_snake_case = factorial(6 * k ) // (factorial(3 * k ) * factorial(_UpperCamelCase ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCamelCase_ : Any = 50
print(F'The first {n} digits of pi is: {pi(n)}')
| 142 | 1 |
import requests
from bsa import BeautifulSoup
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : int = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , """html.parser""" )
lowercase : int = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
lowercase : Union[str, Any] = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
lowercase : Tuple = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 20 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A =logging.get_logger(__name__)
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , a_ : List[Any]="</s>" , a_ : str="<unk>" , a_ : Tuple="<pad>" , a_ : List[Any]=1_25 , a_ : Any=None , **a_ : Tuple , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
__UpperCAmelCase : Optional[Any] = [F'<extra_id_{i}>' for i in range(a_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__UpperCAmelCase : Optional[Any] = len(set(filter(lambda a_ : bool('''extra_id''' in str(a_ ) ) , a_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
__UpperCAmelCase : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else pad_token
__UpperCAmelCase : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else eos_token
__UpperCAmelCase : int = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else unk_token
super().__init__(
eos_token=a_ , unk_token=a_ , pad_token=a_ , extra_ids=a_ , additional_special_tokens=a_ , **a_ , )
__UpperCAmelCase : Dict = extra_ids
__UpperCAmelCase : Any = 2**8 # utf is 8 bits
# define special tokens dict
__UpperCAmelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__UpperCAmelCase : Optional[int] = len(self.special_tokens_encoder )
__UpperCAmelCase : Union[str, Any] = len(a_ )
for i, token in enumerate(a_ ):
__UpperCAmelCase : Union[str, Any] = self.vocab_size + i - n
__UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case__ ( self : Tuple ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case__ ( self : Any , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a_ )) + [1]
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
def snake_case__ ( self : Any , a_ : List[int] ):
'''simple docstring'''
if len(a_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case__ ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : str = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case__ ( self : Optional[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self._add_eos_if_not_present(a_ )
if token_ids_a is None:
return token_ids_a
else:
__UpperCAmelCase : List[str] = self._add_eos_if_not_present(a_ )
return token_ids_a + token_ids_a
def snake_case__ ( self : List[Any] , a_ : str ):
'''simple docstring'''
__UpperCAmelCase : str = [chr(a_ ) for i in text.encode('''utf-8''' )]
return tokens
def snake_case__ ( self : Optional[Any] , a_ : int ):
'''simple docstring'''
if token in self.special_tokens_encoder:
__UpperCAmelCase : Union[str, Any] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__UpperCAmelCase : Optional[Any] = self.added_tokens_encoder[token]
elif len(a_ ) != 1:
__UpperCAmelCase : int = self.unk_token_id
else:
__UpperCAmelCase : List[str] = ord(a_ ) + self._num_special_tokens
return token_id
def snake_case__ ( self : Union[str, Any] , a_ : Optional[int] ):
'''simple docstring'''
if index in self.special_tokens_decoder:
__UpperCAmelCase : Dict = self.special_tokens_decoder[index]
else:
__UpperCAmelCase : List[Any] = chr(index - self._num_special_tokens )
return token
def snake_case__ ( self : int , a_ : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = b''''''
for token in tokens:
if token in self.special_tokens_decoder:
__UpperCAmelCase : Optional[int] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
__UpperCAmelCase : List[str] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
__UpperCAmelCase : Tuple = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
__UpperCAmelCase : int = token.encode('''utf-8''' )
else:
__UpperCAmelCase : Any = bytes([ord(a_ )] )
bstring += tok_string
__UpperCAmelCase : Dict = bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def snake_case__ ( self : Tuple , a_ : str , a_ : Optional[str] = None ):
'''simple docstring'''
return ()
| 226 | 0 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __a (UpperCamelCase_):
'''simple docstring'''
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> str:
"""simple docstring"""
with self.assertRaises(_a ):
SCREAMING_SNAKE_CASE__ : Any = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(_a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE__ : Any = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _a ( self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> Any:
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=_a ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE__ : Optional[int] = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , _a )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple = pa.BufferReader(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , pa.Buffer ) else pa.memory_map(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = pa.ipc.open_stream(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ : List[str] = pa.schema(__lowerCAmelCase ) if fields else None
with ArrowWriter(stream=__lowerCAmelCase , schema=__lowerCAmelCase , writer_batch_size=__lowerCAmelCase ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ : int = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__lowerCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowercase ( ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ : List[Any] = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=__lowerCAmelCase , features=__lowerCAmelCase ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE__ : Tuple = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE__ : str = pa.ipc.open_stream(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : pa.Table = f.read_all()
SCREAMING_SNAKE_CASE__ : Any = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__lowerCAmelCase )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowerCAmelCase , writer_batch_size=__lowerCAmelCase , hash_salt="""split_name""" , check_duplicates=__lowerCAmelCase , ) as writer:
with pytest.raises(__lowerCAmelCase ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowerCAmelCase , writer_batch_size=__lowerCAmelCase , hash_salt="""split_name""" , check_duplicates=__lowerCAmelCase , ) as writer:
with pytest.raises(__lowerCAmelCase ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def _lowercase ( __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowerCAmelCase , writer_batch_size=__lowerCAmelCase , hash_salt="""split_name""" , check_duplicates=__lowerCAmelCase , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ : Optional[int] = pa.schema(__lowerCAmelCase ) if fields else None
with ArrowWriter(stream=__lowerCAmelCase , schema=__lowerCAmelCase , writer_batch_size=__lowerCAmelCase ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ : Tuple = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__lowerCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Dict = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ : List[str] = pa.schema(__lowerCAmelCase ) if fields else None
with ArrowWriter(stream=__lowerCAmelCase , schema=__lowerCAmelCase , writer_batch_size=__lowerCAmelCase ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ : Dict = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__lowerCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.schema(__lowerCAmelCase ) if fields else None
with ArrowWriter(stream=__lowerCAmelCase , schema=__lowerCAmelCase , writer_batch_size=__lowerCAmelCase ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ : List[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__lowerCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowercase ( ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : List[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(__lowerCAmelCase , """test.arrow""" )
with ArrowWriter(path=__lowerCAmelCase , schema=pa.schema(__lowerCAmelCase ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__lowerCAmelCase , metadata=writer._schema.metadata )
_check_output(__lowerCAmelCase , 1 )
def _lowercase ( __lowerCAmelCase ) -> Optional[Any]:
if pa.types.is_list(__lowerCAmelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
if isinstance(lst[0] , __lowerCAmelCase ):
change_first_primitive_element_in_list(lst[0] , __lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : List[str] = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ : Any = pa.array(TypedSequence(__lowerCAmelCase , optimized_int_type=__lowerCAmelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
# in range
SCREAMING_SNAKE_CASE__ : List[str] = pa.array(OptimizedTypedSequence(__lowerCAmelCase , col=__lowerCAmelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE__ : int = copy.deepcopy(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = pa.array(OptimizedTypedSequence(__lowerCAmelCase , col=__lowerCAmelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=__lowerCAmelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = """mock://dataset-train.arrow"""
with ArrowWriter(path=__lowerCAmelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__lowerCAmelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__lowerCAmelCase )
def _lowercase ( ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.BufferOutputStream()
with ParquetWriter(stream=__lowerCAmelCase ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE__ : pa.Table = pq.read_table(__lowerCAmelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
import PIL.Image
SCREAMING_SNAKE_CASE__ : List[str] = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__lowerCAmelCase , format="""png""" )
SCREAMING_SNAKE_CASE__ : str = pa.BufferOutputStream()
with ParquetWriter(
stream=__lowerCAmelCase , features=Features({"""image""": Image()} ) , embed_local_files=__lowerCAmelCase ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE__ : str = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE__ : pa.Table = pq.read_table(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , __lowerCAmelCase )
with open(__lowerCAmelCase , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _lowercase ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__lowerCAmelCase )] )
SCREAMING_SNAKE_CASE__ : List[str] = pa.BufferOutputStream()
with ArrowWriter(stream=__lowerCAmelCase ) as writer:
writer._build_writer(inferred_schema=__lowerCAmelCase )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 56 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=False , _a=True , _a="None" , _a=3 , _a=4 , _a=None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = seq_length
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : str = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Tuple = use_labels
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = num_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = num_choices
SCREAMING_SNAKE_CASE__ : List[str] = relative_attention
SCREAMING_SNAKE_CASE__ : str = position_biased_input
SCREAMING_SNAKE_CASE__ : List[str] = pos_att_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scope
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> Tuple:
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
SCREAMING_SNAKE_CASE__ : Any = 300
return config
def _a ( self , _a ) -> List[str]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DebertaModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(_a , attention_mask=_a , token_type_ids=_a )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , token_type_ids=_a )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForMaskedLM(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = DebertaForSequenceClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_a )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForTokenClassification(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE :str = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = True
_SCREAMING_SNAKE_CASE :str = False
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = DebertaModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=_a , hidden_size=37 )
def _a ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_a )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_a )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_a )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_a )
@slow
def _a ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Dict = DebertaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def _a ( self ) -> Any:
"""simple docstring"""
pass
@slow
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a , attention_mask=_a )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 56 | 1 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
__SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """file.csv"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(_lowerCamelCase , """w""" ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : Dict = tmp_path / """malformed_file.csv"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(_lowerCamelCase , """w""" ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: List[str] ):
__SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """csv_with_image.csv"""
__SCREAMING_SNAKE_CASE : List[str] = textwrap.dedent(
F"\\n image\n {image_file}\n " )
with open(_lowerCamelCase , """w""" ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def lowerCAmelCase_ ( _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / """csv_with_label.csv"""
__SCREAMING_SNAKE_CASE : List[Any] = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(_lowerCamelCase , """w""" ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def lowerCAmelCase_ ( _lowerCamelCase: Dict ):
__SCREAMING_SNAKE_CASE : int = tmp_path / """csv_with_int_list.csv"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(_lowerCamelCase , """w""" ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Tuple , _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : Optional[int] = Csv()
__SCREAMING_SNAKE_CASE : Optional[int] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCamelCase , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(_lowerCamelCase ) in record.message
for record in caplog.records )
@require_pil
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
with open(_lowerCamelCase , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE : Optional[int] = f.read().splitlines()[1]
__SCREAMING_SNAKE_CASE : List[Any] = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = csv._generate_tables([[csv_file_with_image]] )
__SCREAMING_SNAKE_CASE : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
__SCREAMING_SNAKE_CASE : str = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
with open(_lowerCamelCase , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE : str = f.read().splitlines()[1:]
__SCREAMING_SNAKE_CASE : Any = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
__SCREAMING_SNAKE_CASE : str = csv._generate_tables([[csv_file_with_label]] )
__SCREAMING_SNAKE_CASE : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
__SCREAMING_SNAKE_CASE : Any = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(_lowerCamelCase ) for label in labels]
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : str = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda _lowerCamelCase : [int(_lowerCamelCase ) for i in x.split()]} )
__SCREAMING_SNAKE_CASE : str = csv._generate_tables([[csv_file_with_int_list]] )
__SCREAMING_SNAKE_CASE : Dict = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
__SCREAMING_SNAKE_CASE : Dict = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]] | 112 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A : Any = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = XLMProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
else:
lowercase__ = ProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = ProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
lowercase__ = ["""key_proj""", """value_proj""", """query_proj"""]
lowercase__ = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
lowercase__ = key.split(""".""" )
if attributes[0] == "lm_head":
lowercase__ = prophet
lowercase__ = prophet_old
else:
lowercase__ = prophet.prophetnet
lowercase__ = prophet_old.model
lowercase__ = False
for attribute in attributes:
if attribute in mapping:
lowercase__ = mapping[attribute]
if not hasattr(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) > 0:
lowercase__ = attribute
elif hasattr(__magic_name__ , __magic_name__ ):
lowercase__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowercase__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowercase__ = True
break
elif attribute in special_keys and hasattr(__magic_name__ , """in_proj_weight""" ):
lowercase__ = old_model.in_proj_weight.shape[0] // 3
lowercase__ = getattr(__magic_name__ , __magic_name__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ = True
break
if attribute.isdigit():
lowercase__ = model[int(__magic_name__ )]
lowercase__ = old_model[int(__magic_name__ )]
else:
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if old_attribute == "":
lowercase__ = old_model
else:
if not hasattr(__magic_name__ , __magic_name__ ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 305 | 0 |
"""simple docstring"""
import numpy as np
from PIL import Image
def lowercase ( a__ : np.ndarray , a__ : int , a__ : int ) -> np.ndarray:
_UpperCamelCase = np.array(a__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 0
# compute the shape of the output matrix
_UpperCamelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_UpperCamelCase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_UpperCamelCase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_UpperCamelCase = 0
_UpperCamelCase = 0
return updated_arr
def lowercase ( a__ : np.ndarray , a__ : int , a__ : int ) -> np.ndarray:
_UpperCamelCase = np.array(a__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 0
# compute the shape of the output matrix
_UpperCamelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_UpperCamelCase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_UpperCamelCase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_UpperCamelCase = 0
_UpperCamelCase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
UpperCAmelCase = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 369 | """simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase):
snake_case__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ) -> int:
_UpperCamelCase = TextaTextGenerationPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase )
return generator, ["Something to write", "Something else"]
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) -> Union[str, Any]:
_UpperCamelCase = generator('''Something there''' )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ANY(__UpperCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
_UpperCamelCase = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
] , )
_UpperCamelCase = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
[{'''generated_text''': ANY(__UpperCamelCase )}, {'''generated_text''': ANY(__UpperCamelCase )}],
] , )
with self.assertRaises(__UpperCamelCase ):
generator(4 )
@require_torch
def _UpperCamelCase ( self : List[str] ) -> List[str]:
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ''''''}] )
_UpperCamelCase = 3
_UpperCamelCase = generator(
'''Something there''' , num_return_sequences=__UpperCamelCase , num_beams=__UpperCamelCase , )
_UpperCamelCase = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = generator('''This is a test''' , do_sample=__UpperCamelCase , num_return_sequences=2 , return_tensors=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
_UpperCamelCase = generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = generator(
['''This is a test''', '''This is a second test'''] , do_sample=__UpperCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCamelCase , )
self.assertEqual(
__UpperCamelCase , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''' , do_sample=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [{'''generated_text''': ''''''}] )
| 54 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase : Optional[int] = 500000
_lowerCamelCase , _lowerCamelCase : List[str] = os.path.split(__file__)
_lowerCamelCase : Any = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ , **lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = dataset.map(**__a )
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ , **lowercase_ ) -> Dict:
"""simple docstring"""
A__ = dataset.filter(**__a )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
A__ = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
A__ = generate_example_dataset(
os.path.join(__a , '''dataset.arrow''' ) , __a , num_examples=__a )
A__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__a )
def tokenize(lowercase_ ):
return tokenizer(examples['''text'''] )
A__ = map(__a )
A__ = map(__a , batched=__a )
A__ = map(__a , function=lambda lowercase_ : None , batched=__a )
with dataset.formatted_as(type='''numpy''' ):
A__ = map(__a , function=lambda lowercase_ : None , batched=__a )
with dataset.formatted_as(type='''pandas''' ):
A__ = map(__a , function=lambda lowercase_ : None , batched=__a )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
A__ = map(__a , function=lambda lowercase_ : None , batched=__a )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
A__ = map(__a , function=lambda lowercase_ : None , batched=__a )
A__ = map(__a , function=__a , batched=__a )
A__ = filter(__a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__a , '''wb''' ) as f:
f.write(json.dumps(__a ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 14 |
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
while b:
snake_case_ ,snake_case_ : Any = b, a % b
return a
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return a if b == 0 else euclidean_gcd_recursive(__a , a % b )
def SCREAMING_SNAKE_CASE__ ( ):
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 327 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : list[int] ) -> int:
'''simple docstring'''
if not numbers:
return 0
if not isinstance(_snake_case , (list, tuple) ) or not all(
isinstance(_snake_case , _snake_case ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
_A = _A = _A = numbers[0]
for i in range(1 , len(_snake_case ) ):
# update the maximum and minimum subarray products
_A = numbers[i]
if number < 0:
_A , _A = min_till_now, max_till_now
_A = max(_snake_case , max_till_now * number )
_A = min(_snake_case , min_till_now * number )
# update the maximum product found till now
_A = max(_snake_case , _snake_case )
return max_prod
| 371 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _snake_case ( _snake_case : int = 8 ) -> str:
'''simple docstring'''
_A = ascii_letters + digits + punctuation
return "".join(secrets.choice(_snake_case ) for _ in range(_snake_case ) )
def _snake_case ( _snake_case : str , _snake_case : int ) -> str:
'''simple docstring'''
i -= len(_snake_case )
_A = i // 3
_A = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_A = (
chars_incl
+ random(_snake_case , quotient + remainder )
+ random(_snake_case , _snake_case )
+ random(_snake_case , _snake_case )
)
_A = list(_snake_case )
shuffle(_snake_case )
return "".join(_snake_case )
# random is a generalised function for letters, characters and numbers
def _snake_case ( _snake_case : str , _snake_case : int ) -> str:
'''simple docstring'''
return "".join(secrets.choice(_snake_case ) for _ in range(_snake_case ) )
def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] ) -> int:
'''simple docstring'''
pass # Put your code here...
def _snake_case ( _snake_case : Any , _snake_case : str ) -> Dict:
'''simple docstring'''
pass # Put your code here...
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : int ) -> int:
'''simple docstring'''
pass # Put your code here...
def _snake_case ( _snake_case : str , _snake_case : int = 8 ) -> bool:
'''simple docstring'''
if len(_snake_case ) < min_length:
# Your Password must be at least 8 characters long
return False
_A = any(char in ascii_uppercase for char in password )
_A = any(char in ascii_lowercase for char in password )
_A = any(char in digits for char in password )
_A = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = int(input('Please indicate the max length of your password: ' ).strip() )
_A = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(_snake_case ) )
print(
'Alternative Password generated:' , alternative_password_generator(_snake_case , _snake_case ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 271 | 0 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""vocab_file""": """spiece.model"""}
lowercase__ = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
lowercase__ = {
"""AI-Sweden/gpt-sw3-126m""": 2048,
"""AI-Sweden/gpt-sw3-350m""": 2048,
"""AI-Sweden/gpt-sw3-1.6b""": 2048,
"""AI-Sweden/gpt-sw3-6.7b""": 2048,
"""AI-Sweden/gpt-sw3-20b""": 2048,
}
class __lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
a_ : Optional[int] = VOCAB_FILES_NAMES
a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] , a_ : List[Any] , a_ : Optional[Any]=False , a_ : int=False , a_ : Any=False , a_ : Dict=None , a_ : Optional[int]=None , a_ : List[str]=None , a_ : List[str]=None , a_ : str = None , **a_ : List[Any] , ):
lowerCAmelCase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase_ : Tuple = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
lowerCAmelCase_ : Dict = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase_ : Optional[Any] = "<|endoftext|>" if eos_token is None else eos_token
lowerCAmelCase_ : Optional[int] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase_ : Dict = unk_token if pad_token is None else pad_token
lowerCAmelCase_ : List[Any] = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase_ : int = "<pad>" if pad_token is None else pad_token
lowerCAmelCase_ : List[Any] = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , pad_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCAmelCase_ : Optional[int] = do_lower_case
lowerCAmelCase_ : Optional[Any] = remove_space
lowerCAmelCase_ : Dict = keep_accents
lowerCAmelCase_ : Optional[Any] = vocab_file
lowerCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase_ : Tuple = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase_ : Optional[int] = re.compile(
f'''[{"".join(map(a_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]''' )
def __getstate__( self : Tuple ):
lowerCAmelCase_ : str = self.__dict__.copy()
lowerCAmelCase_ : str = None
return state
def __setstate__( self : Union[str, Any] , a_ : Tuple ):
lowerCAmelCase_ : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase_ : Optional[Any] = {}
lowerCAmelCase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase ( self : Optional[int] ):
return len(self.sp_model )
def lowerCamelCase ( self : Optional[Any] , a_ : Dict ):
lowerCAmelCase_ : List[str] = self.non_printing_characters_re.sub("" , a_ )
# Normalize whitespaces
lowerCAmelCase_ : Any = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
lowerCAmelCase_ : Tuple = unicodedata.normalize("NFC" , a_ )
return text
def lowerCamelCase ( self : Optional[int] , a_ : List[str] , **a_ : Union[str, Any] ):
lowerCAmelCase_ : Optional[int] = self.preprocess_text(a_ )
return self.sp_model.encode(a_ , out_type=a_ )
def lowerCamelCase ( self : List[str] , a_ : int ):
return self.sp_model.PieceToId(a_ )
def lowerCamelCase ( self : str , a_ : Optional[int] ):
return self.sp_model.IdToPiece(a_ )
@staticmethod
def lowerCamelCase ( a_ : int ):
return out_string
def lowerCamelCase ( self : Tuple , a_ : List[str] ):
lowerCAmelCase_ : str = []
lowerCAmelCase_ : Optional[Any] = ""
lowerCAmelCase_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Optional[int] = []
else:
current_sub_tokens.append(a_ )
lowerCAmelCase_ : List[Any] = False
out_string += self.sp_model.decode(a_ )
return out_string
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : str = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase ( self : List[Any] , a_ : Any , a_ : Union[str, Any] = None ):
if not os.path.isdir(a_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Dict = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCAmelCase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def lowerCamelCase ( self : int , a_ : Any , a_ : Tuple = False ):
if isinstance(a_ , a_ ):
lowerCAmelCase_ : Dict = self.preprocess_text(a_ )
lowerCAmelCase_ : Optional[int] = self.sp_model.encode(a_ )
else:
lowerCAmelCase_ : Any = [self.preprocess_text(a_ ) for t in text]
lowerCAmelCase_ : Tuple = self.sp_model.encode(a_ )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase_ : List[str] = torch.tensor(a_ )
return token_ids
def lowerCamelCase ( self : Union[str, Any] , a_ : str ):
return self.sp_model.decode(a_ )
def lowerCamelCase ( self : Optional[Any] , a_ : Dict ):
lowerCAmelCase_ : Dict = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
lowerCAmelCase_ : Dict = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(a_ ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=a_ )
| 241 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , lowercase=2 , lowercase=99 , lowercase=0 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=12 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase="last" , lowercase=None , lowercase=None , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_lengths
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = gelu_activation
lowerCAmelCase = sinusoidal_embeddings
lowerCAmelCase = causal
lowerCAmelCase = asm
lowerCAmelCase = n_langs
lowerCAmelCase = vocab_size
lowerCAmelCase = n_special
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = summary_type
lowerCAmelCase = use_proj
lowerCAmelCase = scope
def _snake_case ( self ) -> int:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_input_lengths:
lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self ) -> List[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
lowerCAmelCase = FlaubertModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , lengths=lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = FlaubertWithLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> str:
lowerCAmelCase = FlaubertForQuestionAnsweringSimple(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Dict:
lowerCAmelCase = FlaubertForQuestionAnswering(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , p_mask=lowercase , )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = FlaubertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = FlaubertForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = self.num_choices
lowerCAmelCase = FlaubertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , lowercase , lowercase , lowercase=False ) -> Optional[Any]:
lowerCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = FlaubertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , emb_dim=37 )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase )
@slow
def _snake_case ( self ) -> Tuple:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = FlaubertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@slow
@require_torch_gpu
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=lowercase )
lowerCAmelCase = self._prepare_for_class(lowercase , lowercase )
lowerCAmelCase = torch.jit.trace(
lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , """traced_model.pt""" ) )
lowerCAmelCase = torch.jit.load(os.path.join(lowercase , """traced_model.pt""" ) , map_location=lowercase )
loaded(inputs_dict["""input_ids"""].to(lowercase ) , inputs_dict["""attention_mask"""].to(lowercase ) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) )
| 46 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowercase : Tuple = LayoutLMTokenizer
lowercase : List[str] = LayoutLMTokenizerFast
lowercase : List[str] = True
lowercase : str = True
def a_ ( self ):
super().setUp()
UpperCamelCase : Union[str, Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def a_ ( self , **SCREAMING_SNAKE_CASE_ ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = """UNwant\u00E9d,running"""
UpperCamelCase : Dict = """unwanted, running"""
return input_text, output_text
def a_ ( self ):
UpperCamelCase : int = self.tokenizer_class(self.vocab_file )
UpperCamelCase : Any = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [7, 4, 5, 10, 8, 9] )
def a_ ( self ):
pass
| 27 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 88 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "geglu" , SCREAMING_SNAKE_CASE_ = None , ):
super().__init__()
UpperCamelCase : int = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=SCREAMING_SNAKE_CASE_ , attention_head_dim=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , dropout=SCREAMING_SNAKE_CASE_ , norm_num_groups=SCREAMING_SNAKE_CASE_ , cross_attention_dim=SCREAMING_SNAKE_CASE_ , attention_bias=SCREAMING_SNAKE_CASE_ , sample_size=SCREAMING_SNAKE_CASE_ , num_vector_embeds=SCREAMING_SNAKE_CASE_ , activation_fn=SCREAMING_SNAKE_CASE_ , num_embeds_ada_norm=SCREAMING_SNAKE_CASE_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCamelCase : Optional[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCamelCase : List[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCamelCase : int = [1, 0]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ):
UpperCamelCase : Dict = hidden_states
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCamelCase : Optional[int] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCamelCase : str = self.transformer_index_for_condition[i]
UpperCamelCase : Any = self.transformers[transformer_index](
SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , cross_attention_kwargs=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCamelCase : List[str] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=SCREAMING_SNAKE_CASE_ )
| 27 | 1 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def A__ ( UpperCAmelCase_ ):
return x + 2
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Any = 'x = 3'
_UpperCamelCase : Any = {}
_UpperCamelCase : Optional[Any] = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
assert result == 3
self.assertDictEqual(lowerCamelCase__ ,{'x': 3} )
_UpperCamelCase : str = 'x = y'
_UpperCamelCase : str = {'y': 5}
_UpperCamelCase : Any = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{'x': 5, 'y': 5} )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Any = 'y = add_two(x)'
_UpperCamelCase : Tuple = {'x': 3}
_UpperCamelCase : List[Any] = evaluate(lowerCamelCase__ ,{'add_two': add_two} ,state=lowerCamelCase__ )
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
_UpperCamelCase : int = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : int = 'x = 3'
_UpperCamelCase : Optional[int] = {}
_UpperCamelCase : Any = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
assert result == 3
self.assertDictEqual(lowerCamelCase__ ,{'x': 3} )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
_UpperCamelCase : List[str] = {'x': 3}
_UpperCamelCase : Optional[Any] = evaluate(lowerCamelCase__ ,{'add_two': add_two} ,state=lowerCamelCase__ )
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'y': 5} )
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : str = 'x = 3\ny = 5'
_UpperCamelCase : Dict = {}
_UpperCamelCase : Optional[int] = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'y': 5} )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = 'text = f\'This is x: {x}.\''
_UpperCamelCase : Tuple = {'x': 3}
_UpperCamelCase : int = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'text': 'This is x: 3.'} )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
_UpperCamelCase : List[Any] = {'x': 3}
_UpperCamelCase : str = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'y': 2} )
_UpperCamelCase : Optional[Any] = {'x': 8}
_UpperCamelCase : Optional[Any] = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{'x': 8, 'y': 5} )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : str = 'test_list = [x, add_two(x)]'
_UpperCamelCase : Any = {'x': 3}
_UpperCamelCase : int = evaluate(lowerCamelCase__ ,{'add_two': add_two} ,state=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,[3, 5] )
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'test_list': [3, 5]} )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : str = 'y = x'
_UpperCamelCase : List[str] = {'x': 3}
_UpperCamelCase : Dict = evaluate(lowerCamelCase__ ,{} ,state=lowerCamelCase__ )
assert result == 3
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'y': 3} )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : List[str] = 'test_list = [x, add_two(x)]\ntest_list[1]'
_UpperCamelCase : Any = {'x': 3}
_UpperCamelCase : int = evaluate(lowerCamelCase__ ,{'add_two': add_two} ,state=lowerCamelCase__ )
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'test_list': [3, 5]} )
_UpperCamelCase : Any = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
_UpperCamelCase : Union[str, Any] = {'x': 3}
_UpperCamelCase : int = evaluate(lowerCamelCase__ ,{'add_two': add_two} ,state=lowerCamelCase__ )
assert result == 5
self.assertDictEqual(lowerCamelCase__ ,{'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 'x = 0\nfor i in range(3):\n x = i'
_UpperCamelCase : str = {}
_UpperCamelCase : Union[str, Any] = evaluate(lowerCamelCase__ ,{'range': range} ,state=lowerCamelCase__ )
assert result == 2
self.assertDictEqual(lowerCamelCase__ ,{'x': 2, 'i': 2} )
| 83 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case_ : Tuple = random.Random()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_=1.0 , UpperCAmelCase_=None , UpperCAmelCase_=None ):
if rng is None:
_UpperCamelCase : Dict = global_rng
_UpperCamelCase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase ):
def __init__( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=7 ,lowerCamelCase__ : str=400 ,lowerCamelCase__ : int=2000 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Union[str, Any]=16000 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Optional[int]=True ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : List[str] = min_seq_length
_UpperCamelCase : Optional[int] = max_seq_length
_UpperCamelCase : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCamelCase : List[str] = feature_size
_UpperCamelCase : List[str] = padding_value
_UpperCamelCase : List[Any] = sampling_rate
_UpperCamelCase : Dict = return_attention_mask
_UpperCamelCase : Tuple = do_normalize
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Tuple=False ):
'''simple docstring'''
def _flatten(lowerCamelCase__ : Optional[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_UpperCamelCase : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCamelCase : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
_UpperCamelCase : int = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = WavaVecaFeatureExtractor
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = WavaVecaFeatureExtractionTester(self )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCamelCase__ ,axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ,axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCamelCase : int = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Tuple = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCamelCase : Tuple = feat_extract(speech_inputs[0] ,return_tensors='np' ).input_values
_UpperCamelCase : Any = feat_extract(np_speech_inputs[0] ,return_tensors='np' ).input_values
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
# Test batched
_UpperCamelCase : Union[str, Any] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
_UpperCamelCase : Optional[int] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCamelCase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCamelCase : str = np.asarray(lowerCamelCase__ )
_UpperCamelCase : List[str] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
_UpperCamelCase : int = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
_UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = feat_extract(lowerCamelCase__ ,padding=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='np' )
_UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : List[str] = range(800 ,1400 ,200 )
_UpperCamelCase : List[str] = [floats_list((1, x) )[0] for x in lengths]
_UpperCamelCase : Optional[Any] = ['longest', 'max_length', 'do_not_pad']
_UpperCamelCase : str = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : List[str] = feat_extract(lowerCamelCase__ ,max_length=lowerCamelCase__ ,padding=lowerCamelCase__ )
_UpperCamelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Union[str, Any] = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=1000 ,padding='max_length' ,return_tensors='np' )
_UpperCamelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : int = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=1000 ,padding='longest' ,return_tensors='np' )
_UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Any = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=2000 ,padding='longest' ,return_tensors='np' )
_UpperCamelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
import torch
_UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = np.random.rand(100 ).astype(np.floataa )
_UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCamelCase : Optional[int] = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCamelCase : Tuple = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_UpperCamelCase : Optional[int] = WavaVecaConfig.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask ,config.feat_extract_norm == 'layer' )
| 83 | 1 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowercase__ ( _UpperCAmelCase ):
A__ : Dict ="""MCTCTFeatureExtractor"""
A__ : List[Any] ="""AutoTokenizer"""
def __init__( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.feature_extractor
SCREAMING_SNAKE_CASE__ = False
def __call__( self : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
SCREAMING_SNAKE_CASE__ = kwargs.pop('raw_speech' )
else:
SCREAMING_SNAKE_CASE__ = kwargs.pop('audio' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('sampling_rate' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('text' , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = args[0]
SCREAMING_SNAKE_CASE__ = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE__ = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ = encodings['input_ids']
return inputs
def A_ ( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('input_features' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('labels' , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = args[0]
SCREAMING_SNAKE_CASE__ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE__ = self.feature_extractor.pad(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
if labels is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer.pad(UpperCAmelCase_ , **UpperCAmelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE__ = labels['input_ids']
return input_features
def A_ ( self : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@contextmanager
def A_ ( self : Optional[Any] ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ = self.feature_extractor
SCREAMING_SNAKE_CASE__ = False
| 169 |
from __future__ import annotations
from statistics import mean
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = burst_time[i]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = -1
for i in range(UpperCamelCase_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
SCREAMING_SNAKE_CASE__ = i
total_time += burst_time[target_process]
completed += 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
for i in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case = 4
__snake_case = [2, 5, 3, 7]
__snake_case = [0, 0, 0, 0]
__snake_case = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 169 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A( a , unittest.TestCase ):
snake_case_ = KandinskyVaaPriorPipeline
snake_case_ = ['''prompt''']
snake_case_ = ['''prompt''', '''negative_prompt''']
snake_case_ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
snake_case_ = False
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
__a = PriorTransformer(**_snake_case )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a = CLIPVisionModelWithProjection(_snake_case )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=_snake_case , do_normalize=_snake_case , do_resize=_snake_case , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_image_processor
__a = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=_snake_case , clip_sample_range=10.0 , )
__a = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=0 ) -> Dict:
'''simple docstring'''
if str(_snake_case ).startswith('''mps''' ):
__a = torch.manual_seed(_snake_case )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = '''cpu'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**_snake_case )
__a = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__a = pipe(**self.get_dummy_inputs(_snake_case ) )
__a = output.image_embeds
__a = pipe(
**self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0]
__a = image[0, -10:]
__a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = torch_device == '''cpu'''
__a = True
__a = False
self._test_inference_batch_single_identical(
test_max_difference=_snake_case , relax_max_difference=_snake_case , test_mean_pixel_difference=_snake_case , )
@skip_mps
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = torch_device == '''cpu'''
__a = False
self._test_attention_slicing_forward_pass(
test_max_difference=_snake_case , test_mean_pixel_difference=_snake_case , ) | 6 |
def __UpperCamelCase ( lowerCAmelCase__ : str ):
if n_term == "":
return []
__a : list = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(f"1/{temp + 1}" if series else '''1''' )
return series
if __name__ == "__main__":
lowercase__ =input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 216 | 0 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A : Any = logging.getLogger(__name__)
def _lowerCamelCase ( _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=16 , _UpperCamelCase = 10 , _UpperCamelCase = 2 ):
'''simple docstring'''
def get_dataset(_UpperCamelCase ):
__lowerCAmelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_UpperCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__lowerCAmelCase = get_dataset(_UpperCamelCase )
__lowerCAmelCase = get_dataset(_UpperCamelCase )
__lowerCAmelCase = DataLoader(_UpperCamelCase , shuffle=_UpperCamelCase , batch_size=_UpperCamelCase , num_workers=4 )
__lowerCAmelCase = DataLoader(_UpperCamelCase , shuffle=_UpperCamelCase , batch_size=_UpperCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowerCAmelCase = []
for epoch in range(_UpperCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
__lowerCAmelCase , __lowerCAmelCase = batch
__lowerCAmelCase = model(_UpperCamelCase )
__lowerCAmelCase = torch.nn.functional.mse_loss(_UpperCamelCase , _UpperCamelCase )
accelerator.backward(_UpperCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
__lowerCAmelCase = nn.Parameter(torch.randn(1 ) )
__lowerCAmelCase = nn.Parameter(torch.randn(1 ) )
def snake_case ( self , __a ):
return x * self.a + self.b
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCAmelCase = DummyModel()
__lowerCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__lowerCAmelCase , __lowerCAmelCase = dummy_dataloaders()
__lowerCAmelCase = ProjectConfiguration(total_limit=1 , project_dir=__a , automatic_checkpoint_naming=__a )
# Train baseline
__lowerCAmelCase = Accelerator(project_config=__a )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
__a , __a , __a , __a )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCAmelCase = DummyModel()
__lowerCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__lowerCAmelCase , __lowerCAmelCase = dummy_dataloaders()
# Train baseline
__lowerCAmelCase = Accelerator()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
__a , __a , __a , __a )
# Save initial
__lowerCAmelCase = os.path.join(__a , "initial" )
accelerator.save_state(__a )
((__lowerCAmelCase) , (__lowerCAmelCase)) = model.a.item(), model.b.item()
__lowerCAmelCase = optimizer.state_dict()
__lowerCAmelCase = train(3 , __a , __a , __a , __a )
((__lowerCAmelCase) , (__lowerCAmelCase)) = model.a.item(), model.b.item()
__lowerCAmelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__lowerCAmelCase = DummyModel()
__lowerCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__lowerCAmelCase , __lowerCAmelCase = dummy_dataloaders()
__lowerCAmelCase = Accelerator()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
__a , __a , __a , __a )
accelerator.load_state(__a )
((__lowerCAmelCase) , (__lowerCAmelCase)) = model.a.item(), model.b.item()
__lowerCAmelCase = optimizer.state_dict()
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
__lowerCAmelCase = train(2 , __a , __a , __a , __a )
# Save everything
__lowerCAmelCase = os.path.join(__a , "checkpoint" )
accelerator.save_state(__a )
# Load everything back in and make sure all states work
accelerator.load_state(__a )
test_rands += train(1 , __a , __a , __a , __a )
((__lowerCAmelCase) , (__lowerCAmelCase)) = model.a.item(), model.b.item()
__lowerCAmelCase = optimizer.state_dict()
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCAmelCase = DummyModel()
__lowerCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__lowerCAmelCase , __lowerCAmelCase = dummy_dataloaders()
__lowerCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=__a )
# Train baseline
__lowerCAmelCase = Accelerator(project_dir=__a , project_config=__a )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
__a , __a , __a , __a )
# Save initial
accelerator.save_state()
((__lowerCAmelCase) , (__lowerCAmelCase)) = model.a.item(), model.b.item()
__lowerCAmelCase = optimizer.state_dict()
__lowerCAmelCase = train(3 , __a , __a , __a , __a )
((__lowerCAmelCase) , (__lowerCAmelCase)) = model.a.item(), model.b.item()
__lowerCAmelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__lowerCAmelCase = DummyModel()
__lowerCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__lowerCAmelCase , __lowerCAmelCase = dummy_dataloaders()
__lowerCAmelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__a )
__lowerCAmelCase = Accelerator(project_dir=__a , project_config=__a )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
__a , __a , __a , __a )
accelerator.load_state(os.path.join(__a , "checkpoints" , "checkpoint_0" ) )
((__lowerCAmelCase) , (__lowerCAmelCase)) = model.a.item(), model.b.item()
__lowerCAmelCase = optimizer.state_dict()
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
__lowerCAmelCase = train(2 , __a , __a , __a , __a )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__a , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , __a , __a , __a , __a )
((__lowerCAmelCase) , (__lowerCAmelCase)) = model.a.item(), model.b.item()
__lowerCAmelCase = optimizer.state_dict()
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = torch.tensor([1, 2, 3] )
__lowerCAmelCase = torch.tensor([2, 3, 4] )
__lowerCAmelCase = DummyModel()
__lowerCAmelCase = torch.optim.Adam(net.parameters() )
__lowerCAmelCase = Accelerator()
with self.assertRaises(__a ) as ve:
accelerator.register_for_checkpointing(__a , __a , __a , __a )
__lowerCAmelCase = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCAmelCase = DummyModel()
__lowerCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__lowerCAmelCase = torch.optim.lr_scheduler.StepLR(__a , step_size=1 , gamma=0.9_9 )
__lowerCAmelCase , __lowerCAmelCase = dummy_dataloaders()
__lowerCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=__a )
# Train baseline
__lowerCAmelCase = Accelerator(project_dir=__a , project_config=__a )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
__a , __a , __a , __a , __a )
# Save initial
accelerator.save_state()
__lowerCAmelCase = scheduler.state_dict()
train(3 , __a , __a , __a , __a , __a )
self.assertNotEqual(__a , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__a , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(__a , scheduler.state_dict() )
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCAmelCase = DummyModel()
__lowerCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=__a , total_limit=2 )
# Train baseline
__lowerCAmelCase = Accelerator(project_dir=__a , project_config=__a )
__lowerCAmelCase = accelerator.prepare(__a )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__a , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def snake_case ( self ):
__lowerCAmelCase = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(__a , env=os.environ.copy() )
if __name__ == "__main__":
A : List[Any] = "/tmp/accelerate/state_checkpointing"
A : Any = DummyModel()
A : List[str] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
A : Tuple = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A : Dict = dummy_dataloaders()
A : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A : Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A : int = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A : List[Any] = group["params"][0].device
break
assert param_device.type == accelerator.device.type
A : Any = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
A : Any = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
A : Dict = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 359 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 259 | 0 |
import logging
from transformers import PretrainedConfig
_UpperCAmelCase = logging.getLogger(__name__)
_UpperCAmelCase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''bertabs'''
def __init__( self , lowercase=3_0_5_2_2 , lowercase=5_1_2 , lowercase=6 , lowercase=5_1_2 , lowercase=8 , lowercase=5_1_2 , lowercase=0.2 , lowercase=6 , lowercase=7_6_8 , lowercase=8 , lowercase=2_0_4_8 , lowercase=0.2 , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = vocab_size
A_ : Union[str, Any] = max_pos
A_ : List[str] = enc_layers
A_ : Tuple = enc_hidden_size
A_ : List[Any] = enc_heads
A_ : str = enc_ff_size
A_ : Optional[Any] = enc_dropout
A_ : Dict = dec_layers
A_ : Optional[Any] = dec_hidden_size
A_ : int = dec_heads
A_ : Any = dec_ff_size
A_ : List[str] = dec_dropout
| 140 | import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_UpperCAmelCase = """sshleifer/bart-tiny-random"""
_UpperCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return AutoConfig.from_pretrained(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , *A_ : Tuple = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , *A_ : int = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , *A_ : str = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=lowercase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , *A_ : List[str] = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase ):
create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=lowercase , d=lowercase )
| 140 | 1 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def lowerCamelCase__ ( A : np.ndarray ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def lowerCamelCase__ ( A : np.ndarray ):
'''simple docstring'''
return (gray > 1_27) & (gray <= 2_55)
def lowerCamelCase__ ( A : np.ndarray , A : np.ndarray ):
'''simple docstring'''
UpperCAmelCase = np.zeros_like(A )
UpperCAmelCase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCAmelCase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCAmelCase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCAmelCase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_lowercase : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
_lowercase : str = np.array(Image.open(lena_path))
# kernel to be applied
_lowercase : str = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_lowercase : List[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_lowercase : List[str] = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 91 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : str = logging.get_logger(__name__)
_lowercase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowercase : Tuple = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
_lowercase : Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[Any] = GPTaTokenizer
def __init__( self : Tuple , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Tuple="<|endoftext|>" , lowerCAmelCase : Union[str, Any]="<|endoftext|>" , lowerCAmelCase : Union[str, Any]="<|endoftext|>" , lowerCAmelCase : Optional[int]=False , **lowerCAmelCase : Tuple , )-> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''add_bos_token''' , lowerCAmelCase )
UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase = getattr(lowerCAmelCase , pre_tok_state.pop('''type''' ) )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = pre_tok_class(**lowerCAmelCase )
UpperCAmelCase = add_prefix_space
def a__( self : Union[str, Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def a__( self : List[Any] , lowerCAmelCase : "Conversation" )-> List[int]:
"""simple docstring"""
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 91 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
snake_case_ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """albert"""
def __init__( self :Union[str, Any] , lowercase_ :List[str]=3_00_00 , lowercase_ :Any=1_28 , lowercase_ :Dict=40_96 , lowercase_ :List[Any]=12 , lowercase_ :Any=1 , lowercase_ :str=64 , lowercase_ :List[str]=1_63_84 , lowercase_ :Any=1 , lowercase_ :Any="gelu_new" , lowercase_ :str=0 , lowercase_ :str=0 , lowercase_ :List[Any]=5_12 , lowercase_ :str=2 , lowercase_ :Any=0.02 , lowercase_ :List[str]=1E-12 , lowercase_ :List[str]=0.1 , lowercase_ :str="absolute" , lowercase_ :Optional[int]=0 , lowercase_ :Union[str, Any]=2 , lowercase_ :Union[str, Any]=3 , **lowercase_ :List[str] , ) -> Any:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = embedding_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_hidden_groups
UpperCAmelCase = num_attention_heads
UpperCAmelCase = inner_group_num
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = classifier_dropout_prob
UpperCAmelCase = position_embedding_type
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 78 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('Input value must be an \'int\' type' )
lowercase : str = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 255 | 0 |
from __future__ import annotations
import math
def snake_case_ ( lowerCAmelCase_ : int ):
if num <= 0:
__lowercase : List[Any] = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(lowerCAmelCase_ )
__lowercase : int = [True] * (num + 1)
__lowercase : Dict = []
__lowercase : str = 2
__lowercase : List[str] = int(math.sqrt(lowerCAmelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCAmelCase_ ):
if sieve[i] is True:
__lowercase : Tuple = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip()))) | 306 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : int = batch_size
__lowercase : Any = seq_length
__lowercase : str = is_training
__lowercase : str = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : int = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : Union[str, Any] = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : Tuple = scope
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
__lowercase : Optional[Any] = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
__lowercase : str = model(__a , attention_mask=__a )
__lowercase : List[Any] = model(__a )
__lowercase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowercase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Any = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[str] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = False
_A : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = ()
_A : List[Any] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Optional[Any] = True
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = EsmModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : List[str] = EsmEmbeddings(config=__a )
__lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__lowercase : int = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : Optional[Any] = EsmEmbeddings(config=__a )
__lowercase : Optional[int] = torch.empty(2 , 4 , 30 )
__lowercase : Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( __a ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase : List[str] = model(__a )[0]
__lowercase : Union[str, Any] = 33
__lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
__lowercase : List[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase : Any = model(__a )[0]
# compare the actual values for a slice.
__lowercase : int = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) ) | 306 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
snake_case_ : Union[str, Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 |
'''simple docstring'''
from math import pi
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 83 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
SCREAMING_SNAKE_CASE : Union[str, Any] = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
SCREAMING_SNAKE_CASE : List[str] = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
SCREAMING_SNAKE_CASE : List[str] = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
return float((preds == labels).mean() )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="binary" ) -> str:
_lowercase : Any = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ , average=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Dict = {}
for id_pred, label in zip(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : Optional[int] = F'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
_lowercase : Union[str, Any] = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_lowercase : int = [(pred, label)]
_lowercase , _lowercase : Optional[int] = [], []
for question, preds_labels in question_map.items():
_lowercase , _lowercase : Tuple = zip(*lowerCamelCase_ )
_lowercase : Dict = fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ , average='macro' )
fas.append(lowerCamelCase_ )
_lowercase : Any = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase_ ) )
ems.append(lowerCamelCase_ )
_lowercase : List[str] = float(sum(lowerCamelCase_ ) / len(lowerCamelCase_ ) )
_lowercase : List[str] = sum(lowerCamelCase_ ) / len(lowerCamelCase_ )
_lowercase : Tuple = float(fa_score(y_true=lowerCamelCase_ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]')
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types()), codebase_urls=[], reference_urls=[], format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None, )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64'),
"query": datasets.Value('int64'),
},
"prediction_text": datasets.Value('string'),
},
"references": {
"idx": {
"passage": datasets.Value('int64'),
"query": datasets.Value('int64'),
},
"answers": datasets.Sequence(datasets.Value('string')),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64'),
"paragraph": datasets.Value('int64'),
"question": datasets.Value('int64'),
},
"prediction": datasets.Value('int64'),
},
"references": datasets.Value('int64'),
}
else:
return {
"predictions": datasets.Value('int64'),
"references": datasets.Value('int64'),
}
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(A_, A_)}
elif self.config_name == "cb":
return acc_and_fa(A_, A_, fa_avg='macro')
elif self.config_name == "record":
_lowercase : Optional[int] = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
_lowercase : Union[str, Any] = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(A_, A_)[0]
elif self.config_name == "multirc":
return evaluate_multirc(A_, A_)
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(A_, A_)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]')
| 371 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : List[str] = CTRLTokenizer
lowercase_ : Union[str, Any] = False
lowercase_ : Optional[int] = False
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase : List[Any] = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_lowercase : List[Any] = dict(zip(lowerCamelCase, range(len(lowerCamelCase))))
_lowercase : Optional[int] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_lowercase : Union[str, Any] = {'unk_token': '<unk>'}
_lowercase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
_lowercase : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(lowerCamelCase) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(lowerCamelCase))
def UpperCamelCase ( self, **lowerCamelCase) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = 'adapt react readapt apt'
_lowercase : Tuple = 'adapt react readapt apt'
return input_text, output_text
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
_lowercase : List[str] = 'adapt react readapt apt'
_lowercase : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_lowercase : Optional[Any] = tokenizer.tokenize(lowerCamelCase)
self.assertListEqual(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = tokens + [tokenizer.unk_token]
_lowercase : int = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase), lowerCamelCase)
| 84 | 0 |
"""simple docstring"""
import os
import sys
import unittest
__magic_name__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__magic_name__ = os.path.join(git_repo_path, "src", "diffusers")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = find_backend(""" if not is_torch_available():""")
self.assertEqual(lowerCAmelCase__ , """torch""")
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__SCREAMING_SNAKE_CASE = find_backend(""" if not (is_torch_available() and is_transformers_available()):""")
self.assertEqual(lowerCAmelCase__ , """torch_and_transformers""")
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__SCREAMING_SNAKE_CASE = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""")
self.assertEqual(lowerCAmelCase__ , """torch_and_transformers_and_onnx""")
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , lowerCAmelCase__)
self.assertIn("""torch_and_transformers""" , lowerCAmelCase__)
self.assertIn("""flax_and_transformers""" , lowerCAmelCase__)
self.assertIn("""torch_and_transformers_and_onnx""" , lowerCAmelCase__)
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""" , objects["""torch"""])
self.assertIn("""FlaxUNet2DConditionModel""" , objects["""flax"""])
self.assertIn("""StableDiffusionPipeline""" , objects["""torch_and_transformers"""])
self.assertIn("""FlaxStableDiffusionPipeline""" , objects["""flax_and_transformers"""])
self.assertIn("""LMSDiscreteScheduler""" , objects["""torch_and_scipy"""])
self.assertIn("""OnnxStableDiffusionPipeline""" , objects["""torch_and_transformers_and_onnx"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = create_dummy_object("""CONSTANT""" , """'torch'""")
self.assertEqual(lowerCAmelCase__ , """\nCONSTANT = None\n""")
__SCREAMING_SNAKE_CASE = create_dummy_object("""function""" , """'torch'""")
self.assertEqual(
lowerCAmelCase__ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""")
__SCREAMING_SNAKE_CASE = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
__SCREAMING_SNAKE_CASE = create_dummy_object("""FakeClass""" , """'torch'""")
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
__SCREAMING_SNAKE_CASE = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]})
self.assertEqual(dummy_files["""torch"""] , lowerCAmelCase__)
| 100 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
a_ = True
def lowercase ( self : Optional[Any] ) -> List[str]:
super().setUp()
__lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple ) -> str:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase ( self : List[Any] , lowerCAmelCase_ : str ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def lowercase ( self : List[str] ) -> Optional[int]:
pass # TODO add if relevant
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
pass # TODO add if relevant
def lowercase ( self : Union[str, Any] ) -> Any:
pass # TODO add if relevant
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : List[Any] ) -> int:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(do_lower_case=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(
do_lower_case=lowerCAmelCase_ , normalize_text=lowerCAmelCase_ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(normalize_text=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(do_lower_case=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(normalize_text=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : int ) -> str:
__lowerCAmelCase = SudachiTokenizer(trim_whitespace=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_jumanpp
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = JumanppTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Dict ) -> Dict:
__lowerCAmelCase = JumanppTokenizer(normalize_text=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = JumanppTokenizer(trim_whitespace=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Any:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase ( self : Any ) -> str:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
__lowerCAmelCase = tokenizer.subword_tokenizer
__lowerCAmelCase = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(lowerCAmelCase_ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
__lowerCAmelCase = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(lowerCAmelCase_ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
def lowercase ( self : Optional[Any] ) -> Tuple:
super().setUp()
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : str , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple ) -> Optional[int]:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase ( self : Dict ) -> str:
pass # TODO add if relevant
def lowercase ( self : Any ) -> str:
pass # TODO add if relevant
def lowercase ( self : List[Any] ) -> int:
pass # TODO add if relevant
def lowercase ( self : str ) -> str:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
lowerCAmelCase_ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = CharacterTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
__lowerCAmelCase = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 284 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : str = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class a ( _lowerCamelCase ):
snake_case_ = "luke"
def __init__( self : Optional[Any] , lowercase_ : Optional[Any]=5_0267 , lowercase_ : Union[str, Any]=50_0000 , lowercase_ : List[Any]=768 , lowercase_ : Dict=256 , lowercase_ : List[Any]=12 , lowercase_ : Optional[int]=12 , lowercase_ : Dict=3072 , lowercase_ : Optional[int]="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[str]=512 , lowercase_ : List[Any]=2 , lowercase_ : int=0.02 , lowercase_ : Any=1e-12 , lowercase_ : List[Any]=True , lowercase_ : Any=None , lowercase_ : str=1 , lowercase_ : Dict=0 , lowercase_ : Optional[int]=2 , **lowercase_ : Optional[int] , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ = vocab_size
snake_case_ = entity_vocab_size
snake_case_ = hidden_size
snake_case_ = entity_emb_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = use_entity_aware_attention
snake_case_ = classifier_dropout
| 72 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Union[str, Any] = get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Tuple:
'''simple docstring'''
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
snake_case_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F"Saving model to {output_model_file}" )
torch.save(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Saving model to {output_model_file}" )
torch.save(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = os.path.join(__UpperCAmelCase, F"{MODEL_NAME}_{model_index}" )
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
logger.info(F"Saving model to {ckpt_dir}" )
snake_case_ = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__UpperCAmelCase, storage_writer=dist_cp.FileSystemWriter(__UpperCAmelCase ), planner=DefaultSavePlanner(), )
logger.info(F"Model saved to {ckpt_dir}" )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> str:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
snake_case_ = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Loading model from {input_model_file}" )
snake_case_ = torch.load(__UpperCAmelCase )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Loading model from {input_model_file}" )
snake_case_ = torch.load(__UpperCAmelCase )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = (
os.path.join(__UpperCAmelCase, F"{MODEL_NAME}_{model_index}" )
if F"{MODEL_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading model from {ckpt_dir}" )
snake_case_ = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__UpperCAmelCase, storage_reader=dist_cp.FileSystemReader(__UpperCAmelCase ), planner=DefaultLoadPlanner(), )
snake_case_ = state_dict['''model''']
logger.info(F"Model loaded from {ckpt_dir}" )
model.load_state_dict(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Dict:
'''simple docstring'''
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
snake_case_ = FSDP.optim_state_dict(__UpperCAmelCase, __UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Saving Optimizer state to {output_optimizer_file}" )
torch.save(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Optimizer state saved in {output_optimizer_file}" )
else:
snake_case_ = os.path.join(__UpperCAmelCase, F"{OPTIMIZER_NAME}_{optimizer_index}" )
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
logger.info(F"Saving Optimizer state to {ckpt_dir}" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state}, storage_writer=dist_cp.FileSystemWriter(__UpperCAmelCase ), planner=DefaultSavePlanner(), )
logger.info(F"Optimizer state saved in {ckpt_dir}" )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=0 ) -> Union[str, Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCAmelCase, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
logger.info(F"Loading Optimizer state from {input_optimizer_file}" )
snake_case_ = torch.load(__UpperCAmelCase )
logger.info(F"Optimizer state loaded from {input_optimizer_file}" )
else:
snake_case_ = (
os.path.join(__UpperCAmelCase, F"{OPTIMIZER_NAME}_{optimizer_index}" )
if F"{OPTIMIZER_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading Optimizer from {ckpt_dir}" )
snake_case_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict(), optimizer_key='''optimizer''', storage_reader=dist_cp.FileSystemReader(__UpperCAmelCase ), )
snake_case_ = optim_state['''optimizer''']
logger.info(F"Optimizer loaded from {ckpt_dir}" )
snake_case_ = FSDP.optim_state_dict_to_load(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
optimizer.load_state_dict(__UpperCAmelCase )
| 72 | 1 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def _snake_case ( _snake_case : Any , _snake_case : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_A = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1E-5,
'token_type_vocab_size': 2,
}
_A = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_A = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=_snake_case , output_all_encodings=_snake_case , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , _snake_case ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_A = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
_A = os.path.join(get_home_dir() , 'models' )
_A = _load_vocab(_snake_case , _snake_case , _snake_case , cls=_snake_case )
_A = nlp.model.BERTModel(
_snake_case , len(_snake_case ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=_snake_case , use_token_type_embed=_snake_case , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=_snake_case , use_decoder=_snake_case , )
original_bort.load_parameters(_snake_case , cast_dtype=_snake_case , ignore_extra=_snake_case )
_A = original_bort._collect_params_with_prefix()
# Build our config 🤗
_A = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(_snake_case ),
}
_A = BertConfig.from_dict(_snake_case )
_A = BertForMaskedLM(_snake_case )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_snake_case : List[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_snake_case : str , _snake_case : int ):
_A = hf_param.shape
_A = to_torch(params[gluon_param] )
_A = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
_A = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
_A = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
_A = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
_A = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_A = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_A = hf_bort_model.bert.encoder.layer[i]
# self attention
_A = layer.attention.self
_A = check_and_map_params(
self_attn.key.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
_A = check_and_map_params(
self_attn.key.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
_A = check_and_map_params(
self_attn.query.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
_A = check_and_map_params(
self_attn.query.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
_A = check_and_map_params(
self_attn.value.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
_A = check_and_map_params(
self_attn.value.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
_A = layer.attention.output
_A = check_and_map_params(
self_output.dense.bias , F'''encoder.transformer_cells.{i}.proj.bias''' )
_A = check_and_map_params(
self_output.dense.weight , F'''encoder.transformer_cells.{i}.proj.weight''' )
_A = check_and_map_params(
self_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.layer_norm.beta''' )
_A = check_and_map_params(
self_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
_A = layer.intermediate
_A = check_and_map_params(
intermediate.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
_A = check_and_map_params(
intermediate.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
_A = layer.output
_A = check_and_map_params(
bert_output.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
_A = check_and_map_params(
bert_output.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
_A = check_and_map_params(
bert_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
_A = check_and_map_params(
bert_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_A = RobertaTokenizer.from_pretrained('roberta-base' )
_A = tokenizer.encode_plus(_snake_case )['input_ids']
# Get gluon output
_A = mx.nd.array([input_ids] )
_A = original_bort(inputs=_snake_case , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_snake_case )
_A = BertModel.from_pretrained(_snake_case )
hf_bort_model.eval()
_A = tokenizer.encode_plus(_snake_case , return_tensors='pt' )
_A = hf_bort_model(**_snake_case )[0]
_A = output_gluon[0].asnumpy()
_A = output_hf[0].detach().numpy()
_A = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_A = np.allclose(_snake_case , _snake_case , atol=1E-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , _snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 315 |
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __UpperCAmelCase ( _lowerCamelCase ):
@staticmethod
@abstractmethod
def lowerCamelCase ( lowerCAmelCase_ ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def lowerCamelCase ( self ):
"""simple docstring"""
raise NotImplementedError()
| 160 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = CanineTokenizer
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_snake_case = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase ( self ):
"""simple docstring"""
return CanineTokenizer.from_pretrained('google/canine-s' )
def lowerCamelCase ( self , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
_snake_case = 10_24
return tokenizer
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.canine_tokenizer
_snake_case = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
_snake_case = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.canine_tokenizer
_snake_case = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertIn('token_type_ids' , lowerCAmelCase_ )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.canine_tokenizer
_snake_case = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
_snake_case = tokenizer(
text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_snake_case = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_snake_case = chr(0XE_0_0_7 )
additional_special_tokens.append(lowerCAmelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_snake_case = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertIn(lowerCAmelCase_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case , _snake_case = self.get_clean_sequence(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
_snake_case = 0XE_0_0_5
_snake_case = chr(lowerCAmelCase_ )
tokenizer.add_special_tokens({'cls_token': special_token} )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
_snake_case = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , input_encoded + special_token_id )
_snake_case = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case = chr(0XE_0_0_5 )
_snake_case = chr(0XE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
_snake_case = 0XE_0_0_6
_snake_case = chr(lowerCAmelCase_ )
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase_ )
tokenizer.from_pretrained(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
_snake_case = 0XE_0_0_6
_snake_case = chr(lowerCAmelCase_ )
_snake_case = [new_token_a]
_snake_case = [new_token_a]
with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_snake_case = 0XE_0_0_7
_snake_case = chr(lowerCAmelCase_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = [AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )]
_snake_case = tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case = 'hello world'
if self.space_between_special_tokens:
_snake_case = '[CLS] hello world [SEP]'
else:
_snake_case = input
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCAmelCase_ , [output, output.lower()] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_snake_case = 'a'
_snake_case = ord(lowerCAmelCase_ )
for attr in attributes_list:
setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [] )
_snake_case = 0XE_0_0_6
_snake_case = chr(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [additional_special_token_id] )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
| 160 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[Any] = ['image_processor', 'tokenizer']
UpperCAmelCase__ : int = 'BlipImageProcessor'
UpperCAmelCase__ : Tuple = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[Any] ):
__lowerCamelCase = False
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.image_processor
def __call__( self: str , UpperCamelCase_: ImageInput = None , UpperCamelCase_: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase_: bool = True , UpperCamelCase_: Union[bool, str, PaddingStrategy] = False , UpperCamelCase_: Union[bool, str, TruncationStrategy] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: int = 0 , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[str, TensorType]] = None , **UpperCamelCase_: List[str] , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
__lowerCamelCase = self.tokenizer
__lowerCamelCase = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
return text_encoding
# add pixel_values
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ )
if text is not None:
__lowerCamelCase = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
else:
__lowerCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase_ )
return encoding_image_processor
def lowerCAmelCase__ ( self: Optional[int] , *UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: Tuple ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , *UpperCamelCase_: List[str] , **UpperCamelCase_: str ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.tokenizer.model_input_names
__lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 12 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCAmelCase ( self : int , __a : List[Any]=0 ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(__a ) )
__lowercase : Any = np.random.RandomState(__a )
__lowercase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Optional[Any] = self.get_dummy_inputs()
__lowercase : str = pipe(**__a ).images
__lowercase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowercase : List[Any] = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__a )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Dict = self.get_dummy_inputs()
__lowercase : Optional[int] = pipe(**__a ).images
__lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Any = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
# warmup pass to apply optimizations
__lowercase : Optional[int] = pipe(**self.get_dummy_inputs() )
__lowercase : Dict = self.get_dummy_inputs()
__lowercase : str = pipe(**__a ).images
__lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Any = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Any = self.get_dummy_inputs()
__lowercase : int = pipe(**__a ).images
__lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Optional[int] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Tuple = self.get_dummy_inputs()
__lowercase : Union[str, Any] = pipe(**__a ).images
__lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Union[str, Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : str = self.get_dummy_inputs()
__lowercase : Dict = pipe(**__a ).images
__lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Dict = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = ort.SessionOptions()
__lowercase : Dict = False
return options
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowercase : Union[str, Any] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowercase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__lowercase : int = """A fantasy landscape, trending on artstation"""
__lowercase : int = np.random.RandomState(0 )
__lowercase : List[str] = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type="""np""" , )
__lowercase : Any = output.images
__lowercase : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowercase : Optional[int] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowercase : Any = init_image.resize((768, 512) )
__lowercase : int = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__a , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Union[str, Any] = """A fantasy landscape, trending on artstation"""
__lowercase : Any = np.random.RandomState(0 )
__lowercase : Optional[Any] = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__a , output_type="""np""" , )
__lowercase : str = output.images
__lowercase : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowercase : Union[str, Any] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 | 233 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase : List[Any] = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=8 ) -> List[str]:
lowerCamelCase__ : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase__ : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : List[str] , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : VQModel , ) -> Dict:
super().__init__()
self.register_modules(
unet=UpperCAmelCase , scheduler=UpperCAmelCase , movq=UpperCAmelCase , )
lowerCamelCase__ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A_ ( self : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ) -> int:
if latents is None:
lowerCamelCase__ : Union[str, Any] = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase , dtype=UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase__ : Any = latents.to(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A_ ( self : List[str] , UpperCAmelCase : int=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ : Optional[int] = torch.device(F"""cuda:{gpu_id}""" )
lowerCamelCase__ : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : List[str] , UpperCAmelCase : Optional[Any]=0 ) -> Any:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCamelCase__ : str = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase__ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase__ , lowerCamelCase__ : Tuple = cpu_offload_with_hook(UpperCAmelCase , UpperCAmelCase , prev_module_hook=UpperCAmelCase )
# We'll offload the last model manually.
lowerCamelCase__ : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A_ ( self : str ) -> Tuple:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase )
def __call__( self : str , UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 100 , UpperCAmelCase : float = 4.0 , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = self._execution_device
lowerCamelCase__ : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = torch.cat(UpperCAmelCase , dim=0 )
lowerCamelCase__ : Dict = image_embeds.shape[0] * num_images_per_prompt
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : Any = torch.cat(UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ : List[Any] = image_embeds.repeat_interleave(UpperCAmelCase , dim=0 )
lowerCamelCase__ : Optional[int] = negative_image_embeds.repeat_interleave(UpperCAmelCase , dim=0 )
lowerCamelCase__ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase )
self.scheduler.set_timesteps(UpperCAmelCase , device=UpperCAmelCase )
lowerCamelCase__ : int = self.scheduler.timesteps
lowerCamelCase__ : str = self.unet.config.in_channels
lowerCamelCase__ , lowerCamelCase__ : Dict = downscale_height_and_width(UpperCAmelCase , UpperCAmelCase , self.movq_scale_factor )
# create initial latent
lowerCamelCase__ : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ : List[str] = {'image_embeds': image_embeds}
lowerCamelCase__ : Union[str, Any] = self.unet(
sample=UpperCAmelCase , timestep=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , added_cond_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__ : int = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase__ , lowerCamelCase__ : int = noise_pred.chunk(2 )
lowerCamelCase__ , lowerCamelCase__ : int = variance_pred.chunk(2 )
lowerCamelCase__ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase__ : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase__ , lowerCamelCase__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Optional[Any] = self.scheduler.step(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase , )[0]
# post-processing
lowerCamelCase__ : List[str] = self.movq.decode(UpperCAmelCase , force_not_quantize=UpperCAmelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCamelCase__ : Dict = image * 0.5 + 0.5
lowerCamelCase__ : List[Any] = image.clamp(0 , 1 )
lowerCamelCase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase__ : Any = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 45 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : List[str] = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45 | 1 |
"""simple docstring"""
import numpy as np
from PIL import Image
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : Any = np.array(__lowerCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE__ : List[str] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE__ : int = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Dict = 0
return updated_arr
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array(__lowerCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Dict = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ : Any = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE__ : int = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : Any = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
a :str = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 132 | from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
# TODO Update this
__snake_case = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Tuple = """esm"""
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase : List[str] =vocab_size
UpperCAmelCase : str =hidden_size
UpperCAmelCase : List[Any] =num_hidden_layers
UpperCAmelCase : Optional[Any] =num_attention_heads
UpperCAmelCase : str =intermediate_size
UpperCAmelCase : Any =hidden_dropout_prob
UpperCAmelCase : int =attention_probs_dropout_prob
UpperCAmelCase : Dict =max_position_embeddings
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : Union[str, Any] =layer_norm_eps
UpperCAmelCase : Dict =position_embedding_type
UpperCAmelCase : Optional[Any] =use_cache
UpperCAmelCase : int =emb_layer_norm_before
UpperCAmelCase : List[str] =token_dropout
UpperCAmelCase : Optional[Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
UpperCAmelCase : Optional[Any] =EsmFoldConfig()
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =EsmFoldConfig(**snake_case__ )
UpperCAmelCase : Tuple =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
UpperCAmelCase : Any =get_default_vocab_list()
else:
UpperCAmelCase : Tuple =vocab_list
else:
UpperCAmelCase : Optional[int] =None
UpperCAmelCase : Union[str, Any] =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , snake_case__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , snake_case__ ):
UpperCAmelCase : str =self.esmfold_config.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : str = None
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : float = 0
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : int = 128
__lowerCamelCase : "TrunkConfig" = None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.trunk is None:
UpperCAmelCase : str =TrunkConfig()
elif isinstance(self.trunk , snake_case__ ):
UpperCAmelCase : Optional[int] =TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =asdict(self )
UpperCAmelCase : Any =self.trunk.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 48
__lowerCamelCase : int = 1024
__lowerCamelCase : int = 128
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : float = 0
__lowerCamelCase : float = 0
__lowerCamelCase : bool = False
__lowerCamelCase : int = 4
__lowerCamelCase : Optional[int] = 128
__lowerCamelCase : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
if self.structure_module is None:
UpperCAmelCase : Any =StructureModuleConfig()
elif isinstance(self.structure_module , snake_case__ ):
UpperCAmelCase : str =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
UpperCAmelCase : Optional[int] =self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase : Any =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =asdict(self )
UpperCAmelCase : Tuple =self.structure_module.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 384
__lowerCamelCase : int = 128
__lowerCamelCase : int = 16
__lowerCamelCase : int = 128
__lowerCamelCase : int = 12
__lowerCamelCase : int = 4
__lowerCamelCase : int = 8
__lowerCamelCase : float = 0.1
__lowerCamelCase : int = 8
__lowerCamelCase : int = 1
__lowerCamelCase : int = 2
__lowerCamelCase : int = 7
__lowerCamelCase : int = 10
__lowerCamelCase : float = 1E-8
__lowerCamelCase : float = 1E5
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return asdict(self )
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 348 | 0 |
'''simple docstring'''
from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : int , __A : int = 0 , __A : int = -1 ) -> int:
if hi < 0:
_SCREAMING_SNAKE_CASE = len(__A )
while lo < hi:
_SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_SCREAMING_SNAKE_CASE = mid + 1
else:
_SCREAMING_SNAKE_CASE = mid
return lo
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : int , __A : int = 0 , __A : int = -1 ) -> int:
if hi < 0:
_SCREAMING_SNAKE_CASE = len(__A )
while lo < hi:
_SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_SCREAMING_SNAKE_CASE = mid + 1
else:
_SCREAMING_SNAKE_CASE = mid
return lo
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : int , __A : int = 0 , __A : int = -1 ) -> None:
sorted_collection.insert(bisect_left(__A , __A , __A , __A ) , __A )
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : int , __A : int = 0 , __A : int = -1 ) -> None:
sorted_collection.insert(bisect_right(__A , __A , __A , __A ) , __A )
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : int ) -> int | None:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(__A ) - 1
while left <= right:
_SCREAMING_SNAKE_CASE = left + (right - left) // 2
_SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_SCREAMING_SNAKE_CASE = midpoint - 1
else:
_SCREAMING_SNAKE_CASE = midpoint + 1
return None
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : int ) -> int | None:
_SCREAMING_SNAKE_CASE = bisect.bisect_left(__A , __A )
if index != len(__A ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : int , __A : int , __A : int ) -> int | None:
if right < left:
return None
_SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__A , __A , __A , midpoint - 1 )
else:
return binary_search_by_recursion(__A , __A , midpoint + 1 , __A )
if __name__ == "__main__":
lowerCamelCase_ = input('Enter numbers separated by comma:\n').strip()
lowerCamelCase_ = sorted(int(item) for item in user_input.split(','))
lowerCamelCase_ = int(input('Enter a single number to be found in the list:\n'))
lowerCamelCase_ = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 365 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''deberta-v2'''
def __init__( self : str , __lowerCamelCase : Union[str, Any]=1_2_8_1_0_0 , __lowerCamelCase : Optional[int]=1_5_3_6 , __lowerCamelCase : Optional[int]=2_4 , __lowerCamelCase : Optional[int]=2_4 , __lowerCamelCase : Tuple=6_1_4_4 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=5_1_2 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : int=1e-7 , __lowerCamelCase : Any=False , __lowerCamelCase : Any=-1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Any="gelu" , **__lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = max_relative_positions
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = position_biased_input
# Backwards compatibility
if type(__lowerCamelCase ) == str:
_SCREAMING_SNAKE_CASE = [x.strip() for x in pos_att_type.lower().split("|" )]
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = kwargs.get("pooler_hidden_size" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = pooler_dropout
_SCREAMING_SNAKE_CASE = pooler_hidden_act
class lowercase_ ( A ):
"""simple docstring"""
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
_SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return 1_2
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 4_0 , __lowerCamelCase : int = 4_0 , __lowerCamelCase : "PreTrainedTokenizerBase" = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(preprocessor=__lowerCamelCase , framework=__lowerCamelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 111 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = StableUnCLIPPipeline
__UpperCamelCase = TEXT_TO_IMAGE_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__UpperCamelCase = False
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
UpperCAmelCase = 32
UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
UpperCAmelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str , lowercase_ :Dict=0 ) -> List[str]:
if str(lowercase_ ).startswith('mps' ):
UpperCAmelCase = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self :Optional[Any] ) -> int:
UpperCAmelCase = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Any ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]:
UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase = pipe('anime turle' , generator=lowercase_ , output_type='np' )
UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Tuple ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 78 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Any = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : Optional[int] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
UpperCAmelCase : List[str] = {
"google/rembert": 2_56,
}
UpperCAmelCase : List[str] = "▁"
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Dict = VOCAB_FILES_NAMES
UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Tuple = RemBertTokenizer
def __init__( self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ) -> Any:
'''simple docstring'''
lowerCamelCase = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = False if not self.vocab_file else True
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , A , A = None , A = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A ):
logger.error("""Vocabulary path ({}) should be a directory""".format(A ) )
return
lowerCamelCase = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 252 | 0 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
__A = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 254 |
"""simple docstring"""
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = True
lowerCAmelCase__ :Tuple = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
order.append(_SCREAMING_SNAKE_CASE )
return order
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = True
lowerCAmelCase__ :Union[str, Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return component
def __A (_SCREAMING_SNAKE_CASE ) ->list[list[int]]:
"""simple docstring"""
lowerCAmelCase__ :Any = len(_SCREAMING_SNAKE_CASE ) * [False]
lowerCAmelCase__ :dict[int, list[int]] = {vert: [] for vert in range(len(_SCREAMING_SNAKE_CASE ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = []
for i, was_visited in enumerate(_SCREAMING_SNAKE_CASE ):
if not was_visited:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = []
lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) * [False]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :Dict = order[len(_SCREAMING_SNAKE_CASE ) - i - 1]
if not visited[vert]:
lowerCAmelCase__ :Union[str, Any] = find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
components_list.append(_SCREAMING_SNAKE_CASE )
return components_list
| 254 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__A = logging.get_logger(__name__)
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 293 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Tuple = StableDiffusionXLImgaImgPipeline
__magic_name__ :List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__magic_name__ :Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
__magic_name__ :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ :str = IMAGE_TO_IMAGE_IMAGE_PARAMS
__magic_name__ :Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
lowerCAmelCase__ :str = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
lowerCAmelCase__ :str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase__ :str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , )
lowerCAmelCase__ :int = CLIPTextModel(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
lowerCAmelCase__ :Any = CLIPTextModelWithProjection(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
lowerCAmelCase__ :str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Optional[int] = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ :int = self.get_dummy_components()
lowerCAmelCase__ :List[str] = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :int = sd_pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase__ :List[str] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def snake_case ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_dummy_components()
lowerCAmelCase__ :str = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
lowerCAmelCase__ :int = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 3 * ['this is a negative prompt']
lowerCAmelCase__ :Tuple = negative_prompt
lowerCAmelCase__ :str = 3 * [inputs['prompt']]
lowerCAmelCase__ :Optional[Any] = sd_pipe(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase__ :Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = 3 * ['this is a negative prompt']
lowerCAmelCase__ :str = 3 * [inputs.pop('prompt' )]
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :List[str] = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
lowerCAmelCase__ :Optional[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Dict = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) )
lowerCAmelCase__ :Optional[int] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
lowerCAmelCase__ :int = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_inputs(__UpperCAmelCase )
lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 293 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def lowerCamelCase_ ( lowerCAmelCase: int = 1_00_00_00 , lowerCAmelCase: int = 10 )-> int:
_snake_case : defaultdict = defaultdict(lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_snake_case : Tuple = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_snake_case : Any = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 260 |
from __future__ import annotations
lowerCAmelCase_ = """#"""
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any ):
'''simple docstring'''
_snake_case : dict = {}
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : List[Any] = self._trie
for char in text:
if char not in trie:
_snake_case : int = {}
_snake_case : int = trie[char]
_snake_case : Optional[Any] = True
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Optional[int] = self._trie
for char in prefix:
if char in trie:
_snake_case : Optional[Any] = trie[char]
else:
return []
return self._elements(UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : dict ):
'''simple docstring'''
_snake_case : int = []
for c, v in d.items():
_snake_case : Dict = [' '] if c == END else [(c + s) for s in self._elements(UpperCamelCase )]
result.extend(UpperCamelCase )
return tuple(UpperCamelCase )
lowerCAmelCase_ = Trie()
lowerCAmelCase_ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def lowerCamelCase_ ( lowerCAmelCase: str )-> tuple:
_snake_case : List[Any] = trie.find_word(lowerCAmelCase )
return tuple(string + word for word in suffixes )
def lowerCamelCase_ ( )-> None:
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 260 | 1 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 8 ) ->str:
'''simple docstring'''
a : List[Any] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowercase ) for _ in range(_lowercase ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int ) ->str:
'''simple docstring'''
i -= len(_lowercase )
a : List[str] = i // 3
a : Any = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
a : int = (
chars_incl
+ random(_lowercase , quotient + remainder )
+ random(_lowercase , _lowercase )
+ random(_lowercase , _lowercase )
)
a : List[str] = list(_lowercase )
shuffle(_lowercase )
return "".join(_lowercase )
# random is a generalised function for letters, characters and numbers
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int ) ->str:
'''simple docstring'''
return "".join(secrets.choice(_lowercase ) for _ in range(_lowercase ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : int ) ->List[str]:
'''simple docstring'''
pass # Put your code here...
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : Optional[int] ) ->int:
'''simple docstring'''
pass # Put your code here...
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Optional[Any] ) ->Any:
'''simple docstring'''
pass # Put your code here...
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int = 8 ) ->bool:
'''simple docstring'''
if len(_lowercase ) < min_length:
# Your Password must be at least 8 characters long
return False
a : List[str] = any(char in ascii_uppercase for char in password )
a : Optional[int] = any(char in ascii_lowercase for char in password )
a : List[str] = any(char in digits for char in password )
a : int = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _SCREAMING_SNAKE_CASE ( ) ->Union[str, Any]:
'''simple docstring'''
a : Dict = int(input("Please indicate the max length of your password: " ).strip() )
a : str = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowercase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowercase , _lowercase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 105 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_A = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def a__ ( ) -> str:
UpperCAmelCase__ : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase__ : Dict = g.get_repo("""huggingface/accelerate""" )
UpperCAmelCase__ : str = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase__ : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase : i.created_at , reverse=lowerCAmelCase )
UpperCAmelCase__ : List[Any] = comments[0] if len(lowerCAmelCase ) > 0 else None
UpperCAmelCase__ : Optional[Any] = dt.utcnow()
UpperCAmelCase__ : List[str] = (current_time - issue.updated_at).days
UpperCAmelCase__ : Optional[int] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 171 | 0 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = '▁'
UpperCAmelCase__ = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
UpperCAmelCase__ = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
UpperCAmelCase__ = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
UpperCAmelCase__ = {
'ernie-m-base': 514,
'ernie-m-large': 514,
}
UpperCAmelCase__ = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class lowerCAmelCase__ ( A_ ):
__a = ["input_ids"]
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_INIT_CONFIGURATION
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = RESOURCE_FILES_NAMES
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Any=None , _lowerCamelCase : Any=False , _lowerCamelCase : List[Any]="utf8" , _lowerCamelCase : List[Any]="[UNK]" , _lowerCamelCase : List[str]="[SEP]" , _lowerCamelCase : List[str]="[PAD]" , _lowerCamelCase : List[str]="[CLS]" , _lowerCamelCase : Any="[MASK]" , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : str , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , vocab_file=_lowerCamelCase , encoding=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
_snake_case = do_lower_case
_snake_case = sentencepiece_model_ckpt
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
_snake_case = self.load_vocab(filepath=_lowerCamelCase )
else:
_snake_case = {self.sp_model.id_to_piece(_lowerCamelCase ): id for id in range(self.sp_model.get_piece_size() )}
_snake_case = {v: k for k, v in self.vocab.items()}
def lowercase ( self : List[Any] , _lowerCamelCase : Optional[Any] ):
if text is None:
return None
_snake_case = self.tokenize(_lowerCamelCase )
_snake_case , _snake_case = '''''', []
for i, ch in enumerate(_lowerCamelCase ):
if ch in self.SP_CHAR_MAPPING:
_snake_case = self.SP_CHAR_MAPPING.get(_lowerCamelCase )
else:
_snake_case = unicodedata.normalize('''NFKC''' , _lowerCamelCase )
if self.is_whitespace(_lowerCamelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowerCamelCase ) )
_snake_case , _snake_case , _snake_case = normalized_text, [], 0
if self.do_lower_case:
_snake_case = text.lower()
for token in split_tokens:
if token[:1] == "▁":
_snake_case = token[1:]
_snake_case = text[offset:].index(_lowerCamelCase ) + offset
_snake_case = start + len(_lowerCamelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
_snake_case = end
return token_mapping
@property
def lowercase ( self : Union[str, Any] ):
return len(self.vocab )
def lowercase ( self : str ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ):
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : str , _lowerCamelCase : List[str] ):
_snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def lowercase ( self : Any , _lowerCamelCase : List[Any] ):
return "".join((self.SP_CHAR_MAPPING.get(_lowerCamelCase , _lowerCamelCase ) for c in text) )
def lowercase ( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : Dict=False , _lowerCamelCase : Tuple=64 , _lowerCamelCase : Tuple=0.1 ):
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
_snake_case = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
_snake_case = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
_snake_case = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
_snake_case = self.sp_model.EncodeAsPieces(_lowerCamelCase )
else:
_snake_case = self.sp_model.SampleEncodeAsPieces(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_snake_case = []
for pi, piece in enumerate(_lowerCamelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowerCamelCase ) and pi != 0:
new_pieces.append(_lowerCamelCase )
continue
else:
continue
_snake_case = 0
for i, chunk in enumerate(_lowerCamelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowerCamelCase ) or self.is_punct(_lowerCamelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowerCamelCase )
_snake_case = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_snake_case = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_snake_case = i
if len(_lowerCamelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def lowercase ( self : Optional[int] , _lowerCamelCase : List[Any] ):
_snake_case = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase ( self : int , _lowerCamelCase : Tuple ):
_snake_case = self.convert_ids_to_tokens(_lowerCamelCase )
_snake_case = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase ( self : Optional[int] , _lowerCamelCase : Union[str, Any] ):
return self.vocab.get(_lowerCamelCase , self.vocab.get(self.unk_token ) )
def lowercase ( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
return self.reverse_vocab.get(_lowerCamelCase , self.unk_token )
def lowercase ( self : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowercase ( self : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowercase ( self : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Dict=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1]
def lowercase ( self : Dict , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowerCamelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowerCamelCase ) + 1) + [1] * (len(_lowerCamelCase ) + 3)
def lowercase ( self : Optional[Any] , _lowerCamelCase : Dict ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowercase ( self : List[str] , _lowerCamelCase : Optional[Any] ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowercase ( self : Optional[int] , _lowerCamelCase : Tuple ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowercase ( self : str , _lowerCamelCase : Tuple ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowerCamelCase ) == 1:
_snake_case = unicodedata.category(_lowerCamelCase )
if cat == "Zs":
return True
return False
def lowercase ( self : str , _lowerCamelCase : Union[str, Any] ):
_snake_case = {}
with io.open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(_lowerCamelCase ):
_snake_case = line.rstrip('''\n''' )
_snake_case = int(_lowerCamelCase )
return token_to_idx
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
_snake_case = 0
if os.path.isdir(_lowerCamelCase ):
_snake_case = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
_snake_case = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
_snake_case = token_index
writer.write(token + '''\n''' )
index += 1
_snake_case = os.path.join(_lowerCamelCase , '''sentencepiece.bpe.model''' )
with open(_lowerCamelCase , '''wb''' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (vocab_file,)
| 40 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _UpperCAmelCase ( __lowerCamelCase : str ) -> str:
re.sub('''<n>''' , '''''' , __lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCamelCase ) )
| 40 | 1 |
"""simple docstring"""
def __A ( a_ :list) -> list:
__a : int = len(a_)
for _ in range(a_):
for i in range(_ % 2 , arr_size - 1 , 2):
if arr[i + 1] < arr[i]:
__a , __a : Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
A = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}') | 160 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
A = logging.get_logger(__name__)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) | 160 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = (DDPMParallelScheduler,)
def _snake_case ( self ,**a_ ) -> Dict:
_UpperCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**a_ )
return config
def _snake_case ( self ) -> str:
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a_ )
def _snake_case ( self ) -> List[str]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=a_ ,beta_end=a_ )
def _snake_case ( self ) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a_ )
def _snake_case ( self ) -> str:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a_ )
def _snake_case ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a_ )
def _snake_case ( self ) -> Any:
self.check_over_configs(thresholding=a_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a_ ,prediction_type=a_ ,sample_max_value=a_ ,)
def _snake_case ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def _snake_case ( self ) -> Optional[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=a_ )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : Tuple = self.get_scheduler_config()
_UpperCAmelCase : List[Any] = scheduler_class(**a_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**a_ )
_UpperCAmelCase : int = len(a_ )
_UpperCAmelCase : List[str] = self.dummy_model()
_UpperCAmelCase : List[Any] = self.dummy_sample_deter
_UpperCAmelCase : List[str] = self.dummy_sample_deter + 0.1
_UpperCAmelCase : Optional[Any] = self.dummy_sample_deter - 0.1
_UpperCAmelCase : Union[str, Any] = samplea.shape[0]
_UpperCAmelCase : Dict = torch.stack([samplea, samplea, samplea] ,dim=0 )
_UpperCAmelCase : List[str] = torch.arange(a_ )[0:3, None].repeat(1 ,a_ )
_UpperCAmelCase : Any = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
_UpperCAmelCase : List[Any] = scheduler.batch_step_no_noise(a_ ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) )
_UpperCAmelCase : str = torch.sum(torch.abs(a_ ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def _snake_case ( self ) -> Any:
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**a_ )
_UpperCAmelCase : int = len(a_ )
_UpperCAmelCase : List[str] = self.dummy_model()
_UpperCAmelCase : Any = self.dummy_sample_deter
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
for t in reversed(range(a_ ) ):
# 1. predict noise residual
_UpperCAmelCase : Dict = model(a_ ,a_ )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : Optional[Any] = scheduler.step(a_ ,a_ ,a_ ,generator=a_ ).prev_sample
_UpperCAmelCase : int = pred_prev_sample
_UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(a_ ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _snake_case ( self ) -> str:
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config(prediction_type="""v_prediction""" )
_UpperCAmelCase : Tuple = scheduler_class(**a_ )
_UpperCAmelCase : List[str] = len(a_ )
_UpperCAmelCase : Tuple = self.dummy_model()
_UpperCAmelCase : Optional[int] = self.dummy_sample_deter
_UpperCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(a_ ) ):
# 1. predict noise residual
_UpperCAmelCase : List[Any] = model(a_ ,a_ )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : List[Any] = scheduler.step(a_ ,a_ ,a_ ,generator=a_ ).prev_sample
_UpperCAmelCase : Any = pred_prev_sample
_UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(a_ ) )
_UpperCAmelCase : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : List[Any] = scheduler_class(**a_ )
_UpperCAmelCase : str = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a_ )
_UpperCAmelCase : Dict = scheduler.timesteps
for i, timestep in enumerate(a_ ):
if i == len(a_ ) - 1:
_UpperCAmelCase : List[Any] = -1
else:
_UpperCAmelCase : int = timesteps[i + 1]
_UpperCAmelCase : Dict = scheduler.previous_timestep(a_ )
_UpperCAmelCase : str = prev_t.item()
self.assertEqual(a_ ,a_ )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : Tuple = scheduler_class(**a_ )
_UpperCAmelCase : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(a_ ,msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=a_ )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**a_ )
_UpperCAmelCase : List[str] = [100, 87, 50, 1, 0]
_UpperCAmelCase : Tuple = len(a_ )
with self.assertRaises(a_ ,msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=a_ ,timesteps=a_ )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : str = self.scheduler_classes[0]
_UpperCAmelCase : List[str] = self.get_scheduler_config()
_UpperCAmelCase : Optional[int] = scheduler_class(**a_ )
_UpperCAmelCase : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a_ ,msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" ,):
scheduler.set_timesteps(timesteps=a_ )
| 349 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A_ : Dict = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), F'''{len(lowerCAmelCase_ )} != {len(lowerCAmelCase_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A_ : Union[str, Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A_ : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
try:
_UpperCAmelCase : Any = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(lowerCAmelCase_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = "student" , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , )-> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ ) # purely for convenience
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ).eval()
else:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), F'''teacher must be a model or string got type {type(lowerCAmelCase_ )}'''
_UpperCAmelCase : str = teacher.config.to_diff_dict()
try:
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase : Tuple = teacher_e
if d is None:
_UpperCAmelCase : Dict = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase : List[str] = teacher_e
if d is None:
_UpperCAmelCase : str = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase_ )
# Copy weights
_UpperCAmelCase : Any = teacher.config_class(**lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase : Optional[Any] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = list(range(lowerCAmelCase_ ) ), list(range(lowerCAmelCase_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(lowerCAmelCase_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
if d_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
try:
if hasattr(
lowerCAmelCase_ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase_ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_UpperCAmelCase : Dict = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 349 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a__( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase="None" , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = relative_attention
lowerCAmelCase = position_biased_input
lowerCAmelCase = pos_att_type
lowerCAmelCase = scope
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices)
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_config()
lowerCAmelCase = 300
return config
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size()) , [])
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = DebertaModel(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase)[0]
lowerCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase)[0]
lowerCAmelCase = model(__lowerCAmelCase)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = DebertaForMaskedLM(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = DebertaForSequenceClassification(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = DebertaForTokenClassification(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = DebertaForQuestionAnswering(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a__( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : Tuple = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Optional[Any] = False
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DebertaModelTester(self)
lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = DebertaModel.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
@require_torch
@require_sentencepiece
@require_tokenizers
class a__( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""")
def a_ ( self):
"""simple docstring"""
pass
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = DebertaModel.from_pretrained("""microsoft/deberta-base""")
lowerCAmelCase = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
lowerCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase)[0]
# compare the actual values for a slice.
lowerCAmelCase = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4) , f"{output[:, 1:4, 1:4]}")
| 272 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a_ : str = logging.get_logger(__name__)
a_ : Tuple = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """layoutlmv3"""
def __init__( self , __magic_name__=5_02_65 , __magic_name__=7_68 , __magic_name__=12 , __magic_name__=12 , __magic_name__=30_72 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_12 , __magic_name__=2 , __magic_name__=0.0_2 , __magic_name__=1e-5 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__=10_24 , __magic_name__=1_28 , __magic_name__=1_28 , __magic_name__=True , __magic_name__=32 , __magic_name__=1_28 , __magic_name__=64 , __magic_name__=2_56 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=2_24 , __magic_name__=3 , __magic_name__=16 , __magic_name__=None , **__magic_name__ , ) -> Dict:
super().__init__(
vocab_size=__magic_name__ , hidden_size=__magic_name__ , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , intermediate_size=__magic_name__ , hidden_act=__magic_name__ , hidden_dropout_prob=__magic_name__ , attention_probs_dropout_prob=__magic_name__ , max_position_embeddings=__magic_name__ , type_vocab_size=__magic_name__ , initializer_range=__magic_name__ , layer_norm_eps=__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ , )
_a = max_ad_position_embeddings
_a = coordinate_size
_a = shape_size
_a = has_relative_attention_bias
_a = rel_pos_bins
_a = max_rel_pos
_a = has_spatial_attention_bias
_a = rel_ad_pos_bins
_a = max_rel_ad_pos
_a = text_embed
_a = visual_embed
_a = input_size
_a = num_channels
_a = patch_size
_a = classifier_dropout
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = version.parse("""1.12""" )
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def __UpperCAmelCase ( self ) -> float:
return 1e-5
@property
def __UpperCAmelCase ( self ) -> int:
return 12
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 3 , __magic_name__ = 40 , __magic_name__ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , __magic_name__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = processor.tokenizer.num_special_tokens_to_add(__magic_name__ )
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
_a = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_a = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_a = self._generate_dummy_images(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = dict(
processor(
__magic_name__ , text=__magic_name__ , boxes=__magic_name__ , return_tensors=__magic_name__ , ) )
return inputs
| 168 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_SCREAMING_SNAKE_CASE : Optional[Any] = False
@skip_mps
class _snake_case ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Dict = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : int = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
lowerCAmelCase_ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCAmelCase__ ( cls ) -> Union[str, Any]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , )
snake_case_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
snake_case_ = CLIPTextModel(lowerCamelCase__ )
snake_case_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase__ ( self , a__ , a__=0 ) -> List[str]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("mps" ):
snake_case_ = torch.manual_seed(lowerCamelCase__ )
else:
snake_case_ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
snake_case_ = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = '''cpu'''
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
snake_case_ = self.get_dummy_inputs(lowerCamelCase__ )
snake_case_ = pipe(**lowerCamelCase__ ).images
snake_case_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
snake_case_ = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
snake_case_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ , 1e-3 )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5e-4 )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
@classmethod
def lowerCAmelCase__ ( cls ) -> Any:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase__ )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = torch.manual_seed(51 )
snake_case_ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to("cuda" )
snake_case_ = '''a painting of an elephant with glasses'''
snake_case_ = [5, 7]
snake_case_ = pipe(
prompt=lowerCamelCase__ , token_indices=lowerCamelCase__ , guidance_scale=7.5 , generator=lowerCamelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="numpy" , ).images[0]
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 357 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
_SCREAMING_SNAKE_CASE : List[Any] = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
_SCREAMING_SNAKE_CASE : Any = "▁"
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Tuple = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__ = None , **a__ , ) -> None:
'''simple docstring'''
snake_case_ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
snake_case_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case_ = len(self.sp_model ) - 1
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , a__ , a__ = None , a__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , a__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a__ , out_type=a__ )
def lowerCAmelCase__ ( self , a__ ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(a__ )
return spm_id if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a__ )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = []
snake_case_ = ""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(a__ )
snake_case_ = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def __getstate__( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 92 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : int
_UpperCAmelCase : int
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
_snake_case = [[] for _ in range(lowercase )]
_snake_case = size
def __getitem__( self : Any , lowercase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def A ( self : int ):
'''simple docstring'''
return self._size
def A ( self : Any , lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowercase , lowercase ) )
def A ( self : int , lowercase : int , lowercase : int ):
'''simple docstring'''
_snake_case = deque([start_vertex] )
_snake_case = [None] * self.size
_snake_case = 0
while queue:
_snake_case = queue.popleft()
_snake_case = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_snake_case = current_distance + edge.weight
_snake_case = distances[edge.destination_vertex]
if (
isinstance(lowercase , lowercase )
and new_distance >= dest_vertex_distance
):
continue
_snake_case = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 282 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 354 | from typing import Any
class _lowercase :
def __init__( self : Optional[Any] , snake_case : Any ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = data
UpperCamelCase_ : Any = None
def __repr__( self : int ) -> str:
"""simple docstring"""
return f"Node({self.data})"
class _lowercase :
def __init__( self : str ) -> int:
"""simple docstring"""
UpperCamelCase_ : int = None
def __iter__( self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.head
while node:
yield node.data
UpperCamelCase_ : Dict = node.next
def __len__( self : int ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : List[Any] ) -> str:
"""simple docstring"""
return "->".join([str(snake_case ) for item in self] )
def __getitem__( self : Union[str, Any] , snake_case : int ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Any , snake_case : int , snake_case : Any ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
UpperCamelCase_ : int = self.head
for _ in range(snake_case ):
UpperCamelCase_ : Union[str, Any] = current.next
UpperCamelCase_ : Any = data
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Any ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : Any ) -> None:
"""simple docstring"""
self.insert_nth(0 , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : int , snake_case : Any ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
UpperCamelCase_ : Union[str, Any] = Node(snake_case )
if self.head is None:
UpperCamelCase_ : Union[str, Any] = new_node
elif index == 0:
UpperCamelCase_ : int = self.head # link new_node to head
UpperCamelCase_ : List[str] = new_node
else:
UpperCamelCase_ : List[str] = self.head
for _ in range(index - 1 ):
UpperCamelCase_ : Union[str, Any] = temp.next
UpperCamelCase_ : Dict = temp.next
UpperCamelCase_ : Optional[Any] = new_node
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> None: # print every node data
"""simple docstring"""
print(self )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : int = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
UpperCamelCase_ : str = self.head # default first node
if index == 0:
UpperCamelCase_ : List[Any] = self.head.next
else:
UpperCamelCase_ : Dict = self.head
for _ in range(index - 1 ):
UpperCamelCase_ : Tuple = temp.next
UpperCamelCase_ : List[Any] = temp.next
UpperCamelCase_ : Dict = temp.next.next
return delete_node.data
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> bool:
"""simple docstring"""
return self.head is None
def SCREAMING_SNAKE_CASE__ ( self : str ) -> None:
"""simple docstring"""
UpperCamelCase_ : str = None
UpperCamelCase_ : int = self.head
while current:
# Store the current node's next node.
UpperCamelCase_ : Tuple = current.next
# Make the current node's next point backwards
UpperCamelCase_ : Tuple = prev
# Make the previous node be the current node
UpperCamelCase_ : List[Any] = current
# Make the current node the next node (to progress iteration)
UpperCamelCase_ : Dict = next_node
# Return prev in order to put the head at the end
UpperCamelCase_ : Union[str, Any] = prev
def __lowercase ( ):
UpperCamelCase_ : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCamelCase ) == i
linked_list.insert_nth(lowerCamelCase , i + 1 )
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCamelCase ) == 9
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
UpperCamelCase_ : Optional[int] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(-8 , 1 ) )
def __lowercase ( ):
UpperCamelCase_ : List[str] = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
UpperCamelCase_ : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCamelCase_ : List[Any] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCamelCase_ : Tuple = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCamelCase_ : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCamelCase )
assert (
str(lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowercase ( ):
from doctest import testmod
testmod()
UpperCamelCase_ : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCamelCase )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
UpperCamelCase_ : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCamelCase )
print(F"length of linked_list is : {len(lowerCamelCase )}" )
if __name__ == "__main__":
main()
| 50 | 0 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_lowercase: List[Any] = logging.get_logger(__name__)
def a( A : List[Any] , A : Optional[int] , A : int ) -> List[Any]:
"""simple docstring"""
a = WavaVecaForSequenceClassification.from_pretrained(A , config=A )
a = downstream_dict["projector.weight"]
a = downstream_dict["projector.bias"]
a = downstream_dict["model.post_net.linear.weight"]
a = downstream_dict["model.post_net.linear.bias"]
return model
def a( A : int , A : int , A : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = WavaVecaForAudioFrameClassification.from_pretrained(A , config=A )
a = downstream_dict["model.linear.weight"]
a = downstream_dict["model.linear.bias"]
return model
def a( A : int , A : int , A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a = WavaVecaForXVector.from_pretrained(A , config=A )
a = downstream_dict["connector.weight"]
a = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
a = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
a = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a = downstream_dict["objective.W"]
return model
@torch.no_grad()
def a( A : Optional[Any] , A : List[str] , A : List[str] , A : List[Any] ) -> List[Any]:
"""simple docstring"""
a = torch.load(A , map_location="cpu" )
a = checkpoint["Downstream"]
a = WavaVecaConfig.from_pretrained(A )
a = WavaVecaFeatureExtractor.from_pretrained(
A , return_attention_mask=A , do_normalize=A )
a = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a = convert_classification(A , A , A )
elif arch.endswith("ForAudioFrameClassification" ):
a = convert_diarization(A , A , A )
elif arch.endswith("ForXVector" ):
a = convert_xvector(A , A , A )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
a = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(A )
hf_model.save_pretrained(A )
if __name__ == "__main__":
_lowercase: str = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
_lowercase: List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 227 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_lowercase: Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
_lowercase: List[str] = json.load(f)
@require_torch
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
return FSMTTokenizer.from_pretrained(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = FSMTForConditionalGeneration.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = F'''facebook/wmt19-{pair}'''
a = self.get_tokenizer(lowerCamelCase_ )
a = self.get_model(lowerCamelCase_ )
a = bleu_data[pair]["src"]
a = bleu_data[pair]["tgt"]
a = tokenizer(lowerCamelCase_ , return_tensors="pt" , truncation=lowerCamelCase_ , padding="longest" ).to(lowerCamelCase_ )
a = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
a = tokenizer.batch_decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
a = calculate_bleu(lowerCamelCase_ , lowerCamelCase_ )
print(lowerCamelCase_ )
self.assertGreaterEqual(scores["bleu"] , lowerCamelCase_ )
| 227 | 1 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__UpperCAmelCase :List[str] = logging.get_logger(__name__)
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = CLIPConfig
SCREAMING_SNAKE_CASE : int = ["CLIPEncoderLayer"]
def __init__( self : Optional[int] , snake_case : CLIPConfig ) -> List[Any]:
super().__init__(snake_case )
__UpperCAmelCase : Any = CLIPVisionModelWithProjection(config.vision_config )
__UpperCAmelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
__UpperCAmelCase : str = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Optional[int] , snake_case : Tuple , snake_case : int=0.5 , snake_case : Optional[Any]=0.5 ) -> int:
__UpperCAmelCase : Optional[int] = self.vision_model(snake_case )[0]
__UpperCAmelCase : Any = self.p_head(snake_case )
__UpperCAmelCase : int = nsfw_detected.flatten()
__UpperCAmelCase : Dict = nsfw_detected > p_threshold
__UpperCAmelCase : Tuple = nsfw_detected.tolist()
if any(snake_case ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(snake_case ):
if nsfw_detected_:
__UpperCAmelCase : int = np.zeros(images[idx].shape )
__UpperCAmelCase : Any = self.w_head(snake_case )
__UpperCAmelCase : Tuple = watermark_detected.flatten()
__UpperCAmelCase : str = watermark_detected > w_threshold
__UpperCAmelCase : str = watermark_detected.tolist()
if any(snake_case ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(snake_case ):
if watermark_detected_:
__UpperCAmelCase : int = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected | 240 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class a :
"""simple docstring"""
SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : Tuple[int]
def lowerCamelCase__ ( self : Any ) -> int:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowerCamelCase__ ( self : Any ) -> torch.Tensor:
__UpperCAmelCase : Dict = torch.arange(self.height * self.width )
__UpperCAmelCase : Dict = torch.stack(
[
pixel_indices % self.width,
torch.div(snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def lowerCamelCase__ ( self : Any ) -> int:
__UpperCAmelCase , *__UpperCAmelCase : str = self.shape
__UpperCAmelCase : Dict = int(np.prod(snake_case ) )
__UpperCAmelCase : Tuple = self.get_image_coords()
__UpperCAmelCase : List[Any] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__UpperCAmelCase : Any = self.get_camera_rays(snake_case )
__UpperCAmelCase : List[str] = rays.view(snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : torch.Tensor ) -> torch.Tensor:
__UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase : List[str] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__UpperCAmelCase : List[str] = coords.view(snake_case , -1 , 2 )
__UpperCAmelCase : Optional[Any] = self.resolution()
__UpperCAmelCase : Tuple = self.fov()
__UpperCAmelCase : Optional[int] = (flat.float() / (res - 1)) * 2 - 1
__UpperCAmelCase : Union[str, Any] = fracs * torch.tan(fov / 2 )
__UpperCAmelCase : str = fracs.view(snake_case , -1 , 2 )
__UpperCAmelCase : Any = (
self.z.view(snake_case , 1 , 3 )
+ self.x.view(snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
__UpperCAmelCase : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=snake_case )
__UpperCAmelCase : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(snake_case , *snake_case , 2 , 3 )
def lowerCamelCase__ ( self : Any , snake_case : int , snake_case : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=snake_case , height=snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def _a ( _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : str = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : List[Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__UpperCAmelCase : Dict = np.array([np.sin(_lowercase ), np.cos(_lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__UpperCAmelCase : Any = -z * 4
__UpperCAmelCase : Dict = np.array([np.cos(_lowercase ), -np.sin(_lowercase ), 0.0] )
__UpperCAmelCase : List[str] = np.cross(_lowercase , _lowercase )
origins.append(_lowercase )
xs.append(_lowercase )
ys.append(_lowercase )
zs.append(_lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , width=_lowercase , height=_lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_lowercase )) , ) | 240 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''focalnet'''
def __init__(self , UpperCAmelCase=2_2_4 , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=9_6 , UpperCAmelCase=False , UpperCAmelCase=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , UpperCAmelCase=[2, 2, 6, 2] , UpperCAmelCase=[2, 2, 2, 2] , UpperCAmelCase=[3, 3, 3, 3] , UpperCAmelCase="gelu" , UpperCAmelCase=4.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=False , UpperCAmelCase=1e-4 , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=3_2 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Dict:
super().__init__(**UpperCAmelCase )
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =embed_dim
_lowercase =use_conv_embed
_lowercase =hidden_sizes
_lowercase =depths
_lowercase =focal_levels
_lowercase =focal_windows
_lowercase =hidden_act
_lowercase =mlp_ratio
_lowercase =hidden_dropout_prob
_lowercase =drop_path_rate
_lowercase =use_layerscale
_lowercase =layerscale_value
_lowercase =use_post_layernorm
_lowercase =use_post_layernorm_in_modulation
_lowercase =normalize_modulator
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =encoder_stride
_lowercase =['''stem'''] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
_lowercase , _lowercase =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
| 5 |
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if not isinstance(snake_case_ , snake_case_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__snake_case = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : Optional[Any] = '▁'
A : List[str] = {'vocab_file': 'sentencepiece.bpe.model'}
A : Optional[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
A : Optional[Any] = {
'xlm-roberta-base': 5_1_2,
'xlm-roberta-large': 5_1_2,
'xlm-roberta-large-finetuned-conll02-dutch': 5_1_2,
'xlm-roberta-large-finetuned-conll02-spanish': 5_1_2,
'xlm-roberta-large-finetuned-conll03-english': 5_1_2,
'xlm-roberta-large-finetuned-conll03-german': 5_1_2,
}
class __A( a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , _snake_case , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case = None , **_snake_case , ) -> None:
'''simple docstring'''
__a = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
__a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__a = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a = 1
__a = len(self.sp_model ) + self.fairseq_offset
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> int:
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
__a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> List[int]:
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Dict:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = ''''''.join(_snake_case ).replace(_snake_case , ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , '''wb''' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,) | 366 |
import logging
from transformers.configuration_utils import PretrainedConfig
A : Union[str, Any] = logging.getLogger(__name__)
class __A( a ):
snake_case_ = '''masked_bert'''
def __init__( self , _snake_case=30_522 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="topK" , _snake_case="constant" , _snake_case=0.0 , **_snake_case , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , **_snake_case )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = pruning_method
__a = mask_init
__a = mask_scale | 33 | 0 |
"""simple docstring"""
import os
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = os.path.dirname(os.path.realpath(__UpperCAmelCase ) )
lowerCAmelCase__ : Union[str, Any] = os.path.join(__UpperCAmelCase , """triangle.txt""" )
with open(__UpperCAmelCase ) as f:
lowerCAmelCase__ : List[str] = f.readlines()
lowerCAmelCase__ : Optional[Any] = []
for line in triangle:
lowerCAmelCase__ : Dict = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(__UpperCAmelCase ) )
a.append(__UpperCAmelCase )
for i in range(1 , len(__UpperCAmelCase ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase__ : Dict = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase__ : Tuple = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__UpperCAmelCase , __UpperCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 242 |
"""simple docstring"""
from string import ascii_uppercase
_A = {str(ord(c) - 5_5): c for c in ascii_uppercase}
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
lowerCAmelCase__ : int = """"""
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = 0
while div != 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = divmod(__UpperCAmelCase , __UpperCAmelCase )
if base >= 11 and 9 < mod < 36:
lowerCAmelCase__ : Dict = ALPHABET_VALUES[str(__UpperCAmelCase )]
else:
lowerCAmelCase__ : Union[str, Any] = str(__UpperCAmelCase )
new_value += actual_value
lowerCAmelCase__ : Optional[Any] = num // base
lowerCAmelCase__ : Union[str, Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__UpperCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 3_7):
for num in range(1_0_0_0):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 242 | 1 |
"""simple docstring"""
from timeit import timeit
__SCREAMING_SNAKE_CASE ={
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Any = 0
lowercase_ : str = len(lowerCamelCase__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = len(lowerCamelCase__ ) // 2
lowercase_ : str = len(lowerCamelCase__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(lowerCamelCase__ ) )
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if len(lowerCamelCase__ ) <= 2:
return True
if s[0] == s[len(lowerCamelCase__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
return s == s[::-1]
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Optional[Any] = F'''all({name}(key) is value for key, value in test_data.items())'''
lowercase_ : int = F'''from __main__ import test_data, {name}'''
lowercase_ : List[str] = 50_00_00
lowercase_ : List[str] = timeit(stmt=lowerCamelCase__ , setup=lowerCamelCase__ , number=lowerCamelCase__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"{key:21} {value}")
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 350 | """simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = bp_numa
lowercase_ : Dict = bp_numa
lowercase_ : Tuple = bp_numa
lowercase_ : List[Any] = conva_get[:2]
lowercase_ : int = conva_get[2]
lowercase_ : Dict = size_pa
lowercase_ : int = rate_w
lowercase_ : Union[str, Any] = rate_t
lowercase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__UpperCamelCase ,'wb' ) as f:
pickle.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
with open(__UpperCamelCase ,'rb' ) as f:
lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301
lowercase_ : str = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' )
lowercase_ : Optional[Any] = model_dic.get('num_bp1' )
lowercase_ : str = model_dic.get('num_bp2' )
lowercase_ : Optional[Any] = model_dic.get('num_bp3' )
lowercase_ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase_ : Optional[int] = model_dic.get('rate_thre' )
# create model instance
lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# modify model parameter
lowercase_ : Optional[Any] = model_dic.get('w_conv1' )
lowercase_ : Tuple = model_dic.get('wkj' )
lowercase_ : Union[str, Any] = model_dic.get('vji' )
lowercase_ : Optional[Any] = model_dic.get('thre_conv1' )
lowercase_ : Dict = model_dic.get('thre_bp2' )
lowercase_ : Optional[int] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return round(__UpperCamelCase ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = convs[0]
lowercase_ : Any = convs[1]
lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : Dict = []
lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
lowercase_ : Tuple = []
for i_focus in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[int] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase ,__UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
lowercase_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
lowercase_ : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[int] = []
for i_map in range(len(__UpperCamelCase ) ):
lowercase_ : int = featuremaps[i_map]
lowercase_ : List[str] = []
for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
lowercase_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
lowercase_ : int = np.asarray(__UpperCamelCase )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = np.asarray(__UpperCamelCase )
lowercase_ : Any = np.shape(__UpperCamelCase )
lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
lowercase_ : List[Any] = 0
for i_map in range(__UpperCamelCase ):
lowercase_ : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = pd_pool[
i_pool
]
lowercase_ : Any = i_pool + 1
lowercase_ : Optional[int] = np.multiply(
__UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]:
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) )
lowercase_ : int = 0
lowercase_ : Tuple = []
lowercase_ : Tuple = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : int = np.asmatrix(datas_train[p] )
lowercase_ : Any = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : Optional[int] = np.shape(__UpperCamelCase )
lowercase_ : Optional[int] = self._expand(__UpperCamelCase )
lowercase_ : int = data_bp_input
lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa
lowercase_ : Dict = self.sig(__UpperCamelCase )
lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa
lowercase_ : int = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Optional[int] = np.multiply(
np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji )
lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[Any] = self._calculate_gradient_from_pool(
__UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : int = rp + 1
lowercase_ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase ,'+-' )
plt.plot(__UpperCamelCase ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__UpperCamelCase ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
lowercase_ : List[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Optional[Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : List[str] = self._expand(__UpperCamelCase )
lowercase_ : Any = data_bp_input
lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : str = self.sig(__UpperCamelCase )
lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Optional[int] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 321 | 0 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCAmelCase__ = """src/diffusers"""
# Matches is_xxx_available()
UpperCAmelCase__ = re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
UpperCAmelCase__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
UpperCAmelCase__ = """
{0} = None
"""
UpperCAmelCase__ = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
UpperCAmelCase__ = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = _re_backend.findall(lowercase )
if len(lowercase ) == 0:
return None
return "_and_".join(lowercase )
def __UpperCAmelCase ( ):
"""simple docstring"""
with open(os.path.join(lowercase ,"""__init__.py""" ) ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
_UpperCAmelCase = 0
_UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
_UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(lowercase ) and len(lines[line_index] ) > 1:
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _re_single_line_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase ) > 0:
_UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase ,lowercase )
else:
return DUMMY_CLASS.format(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase=None ):
"""simple docstring"""
if backend_specific_objects is None:
_UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
_UpperCAmelCase = """[""" + """, """.join(f'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
_UpperCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase ,lowercase ) for o in objects] )
_UpperCAmelCase = dummy_file
return dummy_files
def __UpperCAmelCase ( lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_UpperCAmelCase = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
_UpperCAmelCase = os.path.join(lowercase ,"""utils""" )
_UpperCAmelCase = {
backend: os.path.join(lowercase ,f'''dummy_{short_names.get(lowercase ,lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
_UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase ):
with open(lowercase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_UpperCAmelCase = f.read()
else:
_UpperCAmelCase = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(lowercase ,lowercase )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f'''diffusers.utils.dummy_{short_names.get(lowercase ,lowercase )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 289 | """simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = StableUnCLIPPipeline
_snake_case : str = TEXT_TO_IMAGE_PARAMS
_snake_case : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_snake_case : str = False
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = 32
_UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
_UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase )
_UpperCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=0 ):
if str(__lowerCAmelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = pipe("""anime turle""" , generator=__lowerCAmelCase , output_type="""np""" )
_UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 289 | 1 |
from __future__ import annotations
snake_case = tuple[int, int, int]
snake_case = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
snake_case = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
snake_case = """FOBHMDKEXQNRAULPGSJVTYICZW"""
snake_case = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
snake_case = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
snake_case = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
snake_case = """SGLCPQWZHKXAREONTFBVIYJUDM"""
snake_case = """HVSICLTYKQUBXDWAJZOMFGPREN"""
snake_case = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
snake_case = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
snake_case = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if (unique_rotsel := len(set(lowercase ) )) < 3:
SCREAMING_SNAKE_CASE : Tuple = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(lowercase )
# Checks if rotor positions are valid
SCREAMING_SNAKE_CASE : Any = rotpos
if not 0 < rotorposa <= len(lowercase ):
SCREAMING_SNAKE_CASE : Tuple = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(lowercase )
if not 0 < rotorposa <= len(lowercase ):
SCREAMING_SNAKE_CASE : List[str] = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowercase )
if not 0 < rotorposa <= len(lowercase ):
SCREAMING_SNAKE_CASE : Any = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowercase )
# Validates string and returns dict
SCREAMING_SNAKE_CASE : List[str] = _plugboard(lowercase )
return rotpos, rotsel, pbdict
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : int = F'''Plugboard setting isn\'t type string ({type(lowercase )})'''
raise TypeError(lowercase )
elif len(lowercase ) % 2 != 0:
SCREAMING_SNAKE_CASE : List[str] = F'''Odd number of symbols ({len(lowercase )})'''
raise Exception(lowercase )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
SCREAMING_SNAKE_CASE : Any = set()
for i in pbstring:
if i not in abc:
SCREAMING_SNAKE_CASE : Any = F'''\'{i}\' not in list of symbols'''
raise Exception(lowercase )
elif i in tmppbl:
SCREAMING_SNAKE_CASE : List[str] = F'''Duplicate symbol ({i})'''
raise Exception(lowercase )
else:
tmppbl.add(lowercase )
del tmppbl
# Created the dictionary
SCREAMING_SNAKE_CASE : List[Any] = {}
for j in range(0 , len(lowercase ) - 1 , 2 ):
SCREAMING_SNAKE_CASE : Optional[Any] = pbstring[j + 1]
SCREAMING_SNAKE_CASE : Dict = pbstring[j]
return pb
def lowerCamelCase__ ( lowercase , lowercase , lowercase = (rotora, rotora, rotora) , lowercase = "" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = text.upper()
SCREAMING_SNAKE_CASE : Optional[int] = _validator(
lowercase , lowercase , plugb.upper() )
SCREAMING_SNAKE_CASE : Any = rotor_position
SCREAMING_SNAKE_CASE : List[str] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
SCREAMING_SNAKE_CASE : Dict = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
SCREAMING_SNAKE_CASE : str = plugboard[symbol]
# rotor ra --------------------------
SCREAMING_SNAKE_CASE : int = abc.index(lowercase ) + rotorposa
SCREAMING_SNAKE_CASE : Dict = rotora[index % len(lowercase )]
# rotor rb --------------------------
SCREAMING_SNAKE_CASE : str = abc.index(lowercase ) + rotorposa
SCREAMING_SNAKE_CASE : str = rotora[index % len(lowercase )]
# rotor rc --------------------------
SCREAMING_SNAKE_CASE : Tuple = abc.index(lowercase ) + rotorposa
SCREAMING_SNAKE_CASE : List[Any] = rotora[index % len(lowercase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
SCREAMING_SNAKE_CASE : Optional[int] = reflector[symbol]
# 2nd rotors
SCREAMING_SNAKE_CASE : List[Any] = abc[rotora.index(lowercase ) - rotorposa]
SCREAMING_SNAKE_CASE : Tuple = abc[rotora.index(lowercase ) - rotorposa]
SCREAMING_SNAKE_CASE : List[str] = abc[rotora.index(lowercase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
SCREAMING_SNAKE_CASE : Union[str, Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(lowercase ):
SCREAMING_SNAKE_CASE : List[str] = 0
rotorposa += 1
if rotorposa >= len(lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowercase )
return "".join(lowercase )
if __name__ == "__main__":
snake_case = """This is my Python script that emulates the Enigma machine from WWII."""
snake_case = (1, 1, 1)
snake_case = """pictures"""
snake_case = (rotora, rotora, rotora)
snake_case = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 361 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
snake_case = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = EfficientNetConfig()
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"]
SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"]
SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"]
SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"]
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"]
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : str = 1000
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , )
return preprocessor
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )}
SCREAMING_SNAKE_CASE : Dict = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
SCREAMING_SNAKE_CASE : int = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1]
SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight"
SCREAMING_SNAKE_CASE : List[str] = "classifier.bias"
return key_mapping
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE : str = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) )
else:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name](
include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , )
SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables
SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE : Tuple = param.numpy()
SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase )
SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval()
SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase )
replace_params(lowercase , lowercase , lowercase )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase )
SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase )
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 )
SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
snake_case = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_lowerCAmelCase = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_snake_case ) , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_snake_case ) , x.transpose() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case ) , transpose(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , transpose(_snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case ) , transpose(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , transpose(_snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case ) , np.asarray(transpose(_snake_case ) ) ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(_snake_case , axes=(1, 2, 0) ) ) ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , np.reshape(_snake_case , (4, 3) ) ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , np.reshape(_snake_case , (12, 5) ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , reshape(_snake_case , (4, 3) ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , reshape(_snake_case , (12, 5) ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , reshape(_snake_case , (4, 3) ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , reshape(_snake_case , (12, 5) ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , np.asarray(reshape(_snake_case , (4, 3) ) ) ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , np.asarray(reshape(_snake_case , (12, 5) ) ) ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_snake_case ) , np.squeeze(_snake_case ) ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , np.squeeze(_snake_case , axis=2 ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case ) , squeeze(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , squeeze(_snake_case , axis=2 ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case ) , squeeze(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , squeeze(_snake_case , axis=2 ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case ) , np.asarray(squeeze(_snake_case ) ) ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , np.asarray(squeeze(_snake_case , axis=2 ) ) ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , np.expand_dims(_snake_case , axis=1 ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , expand_dims(_snake_case , axis=1 ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , expand_dims(_snake_case , axis=1 ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , np.asarray(expand_dims(_snake_case , axis=1 ) ) ) )
| 82 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__a = logging.getLogger(__name__)
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Union[str, Any] = """token-classification"""
def __init__( self: Any , snake_case: Tuple ) -> List[Any]:
if type(snake_case ) == dict:
snake_case_ :Optional[int] = Namespace(**snake_case )
snake_case_ :Optional[int] = import_module("""tasks""" )
try:
snake_case_ :Any = getattr(snake_case , hparams.task_type )
snake_case_ :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
snake_case_ :Any = self.token_classification_task.get_labels(hparams.labels )
snake_case_ :str = CrossEntropyLoss().ignore_index
super().__init__(snake_case , len(self.labels ) , self.mode )
def lowerCAmelCase_ ( self: Dict , **snake_case: List[Any] ) -> Any:
return self.model(**snake_case )
def lowerCAmelCase_ ( self: str , snake_case: Tuple , snake_case: List[Any] ) -> Optional[int]:
snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case_ :List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case_ :Optional[Any] = self(**snake_case )
snake_case_ :List[str] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case_ :List[Any] = self.hparams
for mode in ["train", "dev", "test"]:
snake_case_ :Optional[int] = self._feature_file(snake_case )
if os.path.exists(snake_case ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , snake_case )
snake_case_ :Optional[int] = torch.load(snake_case )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
snake_case_ :Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case )
snake_case_ :Any = self.token_classification_task.convert_examples_to_features(
snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , snake_case )
torch.save(snake_case , snake_case )
def lowerCAmelCase_ ( self: Optional[int] , snake_case: int , snake_case: int , snake_case: bool = False ) -> DataLoader:
snake_case_ :int = self._feature_file(snake_case )
logger.info("""Loading features from cached file %s""" , snake_case )
snake_case_ :str = torch.load(snake_case )
snake_case_ :Dict = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case_ :str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
snake_case_ :List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
snake_case_ :List[str] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
snake_case_ :Any = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case )
def lowerCAmelCase_ ( self: List[str] , snake_case: Dict , snake_case: Union[str, Any] ) -> List[str]:
"""Compute validation""" ""
snake_case_ :List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case_ :Dict = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case_ :Dict = self(**snake_case )
snake_case_, snake_case_ :Dict = outputs[:2]
snake_case_ :Union[str, Any] = logits.detach().cpu().numpy()
snake_case_ :List[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase_ ( self: List[Any] , snake_case: int ) -> Tuple:
snake_case_ :Union[str, Any] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
snake_case_ :Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
snake_case_ :Tuple = np.argmax(snake_case , axis=2 )
snake_case_ :List[str] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
snake_case_ :Optional[Any] = dict(enumerate(self.labels ) )
snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )]
snake_case_ :Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
snake_case_ :str = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(snake_case , snake_case ),
"""precision""": precision_score(snake_case , snake_case ),
"""recall""": recall_score(snake_case , snake_case ),
"""f1""": fa_score(snake_case , snake_case ),
}
snake_case_ :List[Any] = dict(results.items() )
snake_case_ :Union[str, Any] = results
return ret, preds_list, out_label_list
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Dict ) -> Optional[Any]:
# when stable
snake_case_, snake_case_, snake_case_ :Tuple = self._eval_end(snake_case )
snake_case_ :str = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int] ) -> Any:
# updating to test_epoch_end instead of deprecated test_end
snake_case_, snake_case_, snake_case_ :Any = self._eval_end(snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
snake_case_ :Optional[int] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase_ ( snake_case: Any , snake_case: int ) -> Dict:
# Add NER specific options
BaseTransformer.add_model_specific_args(snake_case , snake_case )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=snake_case , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=snake_case , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=snake_case , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
__a = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__a = NERTransformer.add_model_specific_args(parser, os.getcwd())
__a = parser.parse_args()
__a = NERTransformer(args)
__a = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__a = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
__a = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 66 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase__ ( __UpperCamelCase )-> list[list[float]]:
UpperCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(__UpperCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
UpperCamelCase = [[0.0, 0.0], [0.0, 0.0]]
UpperCamelCase ,UpperCamelCase = matrix[1][1], matrix[0][0]
UpperCamelCase ,UpperCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(__UpperCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(__UpperCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
UpperCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCamelCase = array(__UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
UpperCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCamelCase = array(__UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(__UpperCamelCase )
# Calculate the inverse of the matrix
return [[float(d(__UpperCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 183 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list:
UpperCamelCase = word.split()
def justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
UpperCamelCase = max_width - width
UpperCamelCase = len(__UpperCamelCase )
if len(__UpperCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCamelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCamelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCamelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__UpperCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCamelCase = []
for i in range(__UpperCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__UpperCamelCase )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = 0
for word in words:
if width + len(__UpperCamelCase ) + len(__UpperCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__UpperCamelCase )
width += len(__UpperCamelCase )
else:
# justify the line and add it to result
answer.append(justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
# reset new line and new width
UpperCamelCase ,UpperCamelCase = [word], len(__UpperCamelCase )
UpperCamelCase = max_width - width - len(__UpperCamelCase )
answer.append(""" """.join(__UpperCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 183 | 1 |
"""simple docstring"""
from __future__ import annotations
_lowercase = [True] * 1_00_00_01
_lowercase = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
_lowercase = False
i += 1
def _snake_case ( snake_case__ : int ):
return seive[n]
def _snake_case ( snake_case__ : int ):
return any(digit in '02468' for digit in str(snake_case__ ) )
def _snake_case ( snake_case__ : int = 100_0000 ):
A = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(snake_case__ ) and not contains_an_even_digit(snake_case__ ):
A = str(snake_case__ )
A = [int(str_num[j:] + str_num[:j] ) for j in range(len(snake_case__ ) )]
if all(is_prime(snake_case__ ) for i in list_nums ):
result.append(snake_case__ )
return result
def _snake_case ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"""{len(find_circular_primes()) = }""") | 74 |
"""simple docstring"""
class _lowerCamelCase :
def __init__(self , __a ) -> None:
UpperCamelCase = len(__a )
UpperCamelCase = [0] * len_array
if len_array > 0:
UpperCamelCase = array[0]
for i in range(1 , __a ):
UpperCamelCase = self.prefix_sum[i - 1] + array[i]
def snake_case_ (self , __a , __a ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def snake_case_ (self , __a ) -> bool:
UpperCamelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 153 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : int=2 , snake_case__ : Optional[Any]=True , snake_case__ : str=False , snake_case__ : Dict=1_0 , snake_case__ : List[Any]=3 , snake_case__ : List[Any]=3_2 * 8 , snake_case__ : int=3_2 * 8 , snake_case__ : Dict=4 , snake_case__ : Dict=6_4 , ):
'''simple docstring'''
lowercase :List[str] = parent
lowercase :Tuple = batch_size
lowercase :Tuple = is_training
lowercase :List[Any] = use_auxiliary_loss
lowercase :List[Any] = num_queries
lowercase :str = num_channels
lowercase :Tuple = min_size
lowercase :Optional[int] = max_size
lowercase :List[Any] = num_labels
lowercase :Optional[int] = hidden_dim
lowercase :Union[str, Any] = hidden_dim
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case__ )
lowercase :Dict = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case__ )
lowercase :str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case__ ) > 0.5
).float()
lowercase :Dict = (torch.rand((self.batch_size, self.num_labels) , device=snake_case__ ) > 0.5).long()
lowercase :List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :Tuple = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowercase :Optional[Any] = self.num_queries
lowercase :Any = self.num_labels
lowercase :int = [1, 1, 1, 1]
lowercase :Union[str, Any] = self.num_channels
lowercase :str = 6_4
lowercase :Optional[int] = 1_2_8
lowercase :Tuple = self.hidden_dim
lowercase :Union[str, Any] = self.hidden_dim
lowercase :Optional[int] = self.hidden_dim
return config
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :List[str] = self.prepare_config_and_inputs()
lowercase :Optional[Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def __snake_case ( self : Any , snake_case__ : Any , snake_case__ : List[Any] ):
'''simple docstring'''
lowercase :Optional[Any] = output.encoder_hidden_states
lowercase :int = output.pixel_decoder_hidden_states
lowercase :List[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case__ ) , config.decoder_layers )
def __snake_case ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any]=False ):
'''simple docstring'''
with torch.no_grad():
lowercase :Tuple = MaskaFormerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :Optional[Any] = model(pixel_values=snake_case__ , pixel_mask=snake_case__ )
lowercase :Optional[int] = model(snake_case__ , output_hidden_states=snake_case__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case__ , snake_case__ )
def __snake_case ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Any , snake_case__ : Optional[int] ):
'''simple docstring'''
lowercase :Optional[int] = MaskaFormerForUniversalSegmentation(config=snake_case__ )
model.to(snake_case__ )
model.eval()
def comm_check_on_output(snake_case__ : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase :Any = model(pixel_values=snake_case__ , pixel_mask=snake_case__ )
lowercase :Dict = model(snake_case__ )
comm_check_on_output(snake_case__ )
lowercase :List[Any] = model(
pixel_values=snake_case__ , pixel_mask=snake_case__ , mask_labels=snake_case__ , class_labels=snake_case__ )
comm_check_on_output(snake_case__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__A : List[str] = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
__A : Union[str, Any] = False
__A : str = False
__A : Optional[int] = False
__A : str = False
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :Tuple = MaskaFormerModelTester(self )
lowercase :List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case__ , **snake_case__ , output_hidden_states=snake_case__ )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*snake_case__ )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def __snake_case ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def __snake_case ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __snake_case ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :List[str] = model_class(snake_case__ )
lowercase :str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :Union[str, Any] = [*signature.parameters.keys()]
lowercase :List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
@slow
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowercase :Dict = MaskaFormerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :List[Any] = (self.model_tester.min_size,) * 2
lowercase :Optional[int] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=snake_case__ ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=snake_case__ ),
'''class_labels''': torch.zeros(2 , 1_0 , device=snake_case__ ).long(),
}
lowercase :Tuple = self.model_tester.get_config()
lowercase :str = MaskaFormerForUniversalSegmentation(snake_case__ ).to(snake_case__ )
lowercase :Dict = model(**snake_case__ )
self.assertTrue(outputs.loss is not None )
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case__ , **snake_case__ , output_hidden_states=snake_case__ )
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :List[str] = model_class(snake_case__ ).to(snake_case__ )
lowercase :int = model(**snake_case__ , output_attentions=snake_case__ )
self.assertTrue(outputs.attentions is not None )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase :Tuple = self.all_model_classes[1]
lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
lowercase :Any = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
lowercase :str = model(snake_case__ , mask_labels=snake_case__ , class_labels=snake_case__ ).loss
loss.backward()
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :Optional[Any] = self.all_model_classes[1]
lowercase :Any = self.model_tester.prepare_config_and_inputs()
lowercase :Optional[Any] = True
lowercase :Union[str, Any] = True
lowercase :Any = model_class(snake_case__ ).to(snake_case__ )
model.train()
lowercase :Tuple = model(snake_case__ , mask_labels=snake_case__ , class_labels=snake_case__ )
lowercase :int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase :Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowercase :str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase :Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase = 1E-4
def lowerCamelCase () -> Tuple:
lowercase :List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __snake_case ( self : Any ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(snake_case__ )
lowercase :Any = self.default_image_processor
lowercase :int = prepare_img()
lowercase :int = image_processor(snake_case__ , return_tensors='''pt''' ).to(snake_case__ )
lowercase :List[str] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case__ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
lowercase :Any = model(**snake_case__ )
lowercase :Dict = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(snake_case__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case__ , atol=snake_case__ ) )
lowercase :Optional[Any] = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(snake_case__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case__ , atol=snake_case__ ) )
lowercase :Any = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(snake_case__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case__ ).eval()
lowercase :int = self.default_image_processor
lowercase :List[Any] = prepare_img()
lowercase :Optional[int] = image_processor(snake_case__ , return_tensors='''pt''' ).to(snake_case__ )
lowercase :List[str] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case__ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
lowercase :Union[str, Any] = model(**snake_case__ )
# masks_queries_logits
lowercase :List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowercase :Optional[Any] = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
lowercase :Union[str, Any] = torch.tensor(snake_case__ ).to(snake_case__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case__ , atol=snake_case__ ) )
# class_queries_logits
lowercase :List[str] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowercase :List[str] = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case__ ).eval()
lowercase :List[str] = self.default_image_processor
lowercase :Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowercase :Any = inputs['''pixel_values'''].to(snake_case__ )
lowercase :List[Any] = [el.to(snake_case__ ) for el in inputs['''mask_labels''']]
lowercase :Optional[Any] = [el.to(snake_case__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowercase :Dict = model(**snake_case__ )
self.assertTrue(outputs.loss is not None )
| 356 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "layoutlmv3"
def __init__( self : int , snake_case__ : Any=5_0_2_6_5 , snake_case__ : int=7_6_8 , snake_case__ : Dict=1_2 , snake_case__ : Optional[Any]=1_2 , snake_case__ : Union[str, Any]=3_0_7_2 , snake_case__ : Tuple="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : int=5_1_2 , snake_case__ : int=2 , snake_case__ : Optional[int]=0.02 , snake_case__ : Union[str, Any]=1e-5 , snake_case__ : Optional[int]=1 , snake_case__ : Any=0 , snake_case__ : Optional[int]=2 , snake_case__ : int=1_0_2_4 , snake_case__ : str=1_2_8 , snake_case__ : Tuple=1_2_8 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=3_2 , snake_case__ : Any=1_2_8 , snake_case__ : List[Any]=6_4 , snake_case__ : List[Any]=2_5_6 , snake_case__ : Any=True , snake_case__ : Optional[Any]=True , snake_case__ : Tuple=True , snake_case__ : List[Any]=2_2_4 , snake_case__ : Optional[int]=3 , snake_case__ : Union[str, Any]=1_6 , snake_case__ : str=None , **snake_case__ : List[str] , ):
'''simple docstring'''
super().__init__(
vocab_size=snake_case__ , hidden_size=snake_case__ , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , intermediate_size=snake_case__ , hidden_act=snake_case__ , hidden_dropout_prob=snake_case__ , attention_probs_dropout_prob=snake_case__ , max_position_embeddings=snake_case__ , type_vocab_size=snake_case__ , initializer_range=snake_case__ , layer_norm_eps=snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
lowercase :Optional[int] = max_ad_position_embeddings
lowercase :Tuple = coordinate_size
lowercase :Any = shape_size
lowercase :Union[str, Any] = has_relative_attention_bias
lowercase :Optional[Any] = rel_pos_bins
lowercase :Tuple = max_rel_pos
lowercase :Any = has_spatial_attention_bias
lowercase :Any = rel_ad_pos_bins
lowercase :str = max_rel_ad_pos
lowercase :int = text_embed
lowercase :Optional[int] = visual_embed
lowercase :str = input_size
lowercase :List[str] = num_channels
lowercase :str = patch_size
lowercase :Any = classifier_dropout
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = version.parse("1.12" )
@property
def __snake_case ( self : Any ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __snake_case ( self : int ):
'''simple docstring'''
return 1e-5
@property
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return 1_2
def __snake_case ( self : str , snake_case__ : "ProcessorMixin" , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional["TensorType"] = None , snake_case__ : int = 3 , snake_case__ : int = 4_0 , snake_case__ : int = 4_0 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , snake_case__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase :Dict = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase :Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(snake_case__ )
lowercase :List[str] = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
lowercase :Tuple = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase :List[str] = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase :List[Any] = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase :Dict = dict(
processor(
snake_case__ , text=snake_case__ , boxes=snake_case__ , return_tensors=snake_case__ , ) )
return inputs
| 172 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _A ( A__ , A__ , A__ = 1 / sqrt(2 ) ):
"""simple docstring"""
__lowercase = tau * frequency / samplerate
__lowercase = sin(A__ )
__lowercase = cos(A__ )
__lowercase = _sin / (2 * q_factor)
__lowercase = (1 - _cos) / 2
__lowercase = 1 - _cos
__lowercase = 1 + alpha
__lowercase = -2 * _cos
__lowercase = 1 - alpha
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( A__ , A__ , A__ = 1 / sqrt(2 ) ):
"""simple docstring"""
__lowercase = tau * frequency / samplerate
__lowercase = sin(A__ )
__lowercase = cos(A__ )
__lowercase = _sin / (2 * q_factor)
__lowercase = (1 + _cos) / 2
__lowercase = -1 - _cos
__lowercase = 1 + alpha
__lowercase = -2 * _cos
__lowercase = 1 - alpha
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( A__ , A__ , A__ = 1 / sqrt(2 ) ):
"""simple docstring"""
__lowercase = tau * frequency / samplerate
__lowercase = sin(A__ )
__lowercase = cos(A__ )
__lowercase = _sin / (2 * q_factor)
__lowercase = _sin / 2
__lowercase = 0
__lowercase = -ba
__lowercase = 1 + alpha
__lowercase = -2 * _cos
__lowercase = 1 - alpha
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( A__ , A__ , A__ = 1 / sqrt(2 ) ):
"""simple docstring"""
__lowercase = tau * frequency / samplerate
__lowercase = sin(A__ )
__lowercase = cos(A__ )
__lowercase = _sin / (2 * q_factor)
__lowercase = 1 - alpha
__lowercase = -2 * _cos
__lowercase = 1 + alpha
__lowercase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _A ( A__ , A__ , A__ , A__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
__lowercase = tau * frequency / samplerate
__lowercase = sin(A__ )
__lowercase = cos(A__ )
__lowercase = _sin / (2 * q_factor)
__lowercase = 10 ** (gain_db / 40)
__lowercase = 1 + alpha * big_a
__lowercase = -2 * _cos
__lowercase = 1 - alpha * big_a
__lowercase = 1 + alpha / big_a
__lowercase = -2 * _cos
__lowercase = 1 - alpha / big_a
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( A__ , A__ , A__ , A__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
__lowercase = tau * frequency / samplerate
__lowercase = sin(A__ )
__lowercase = cos(A__ )
__lowercase = _sin / (2 * q_factor)
__lowercase = 10 ** (gain_db / 40)
__lowercase = (big_a + 1) - (big_a - 1) * _cos
__lowercase = (big_a + 1) + (big_a - 1) * _cos
__lowercase = (big_a - 1) - (big_a + 1) * _cos
__lowercase = (big_a - 1) + (big_a + 1) * _cos
__lowercase = 2 * sqrt(A__ ) * alpha
__lowercase = big_a * (pmc + aaa)
__lowercase = 2 * big_a * mpc
__lowercase = big_a * (pmc - aaa)
__lowercase = ppmc + aaa
__lowercase = -2 * pmpc
__lowercase = ppmc - aaa
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( A__ , A__ , A__ , A__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
__lowercase = tau * frequency / samplerate
__lowercase = sin(A__ )
__lowercase = cos(A__ )
__lowercase = _sin / (2 * q_factor)
__lowercase = 10 ** (gain_db / 40)
__lowercase = (big_a + 1) - (big_a - 1) * _cos
__lowercase = (big_a + 1) + (big_a - 1) * _cos
__lowercase = (big_a - 1) - (big_a + 1) * _cos
__lowercase = (big_a - 1) + (big_a + 1) * _cos
__lowercase = 2 * sqrt(A__ ) * alpha
__lowercase = big_a * (ppmc + aaa)
__lowercase = -2 * big_a * pmpc
__lowercase = big_a * (ppmc - aaa)
__lowercase = pmc + aaa
__lowercase = 2 * mpc
__lowercase = pmc - aaa
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 104 |
'''simple docstring'''
import torch
from torch import nn
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int]=1 ,lowercase__ : Optional[Any]=False ):
super().__init__()
__lowercase = n_token
__lowercase = d_embed
__lowercase = d_proj
__lowercase = cutoffs + [n_token]
__lowercase = [0] + self.cutoffs
__lowercase = div_val
__lowercase = self.cutoffs[0]
__lowercase = len(self.cutoffs ) - 1
__lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__lowercase = nn.Parameter(torch.zeros(self.n_clusters ,self.d_embed ) )
__lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
__lowercase = nn.ModuleList()
__lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase__ ,lowercase__ ) ) )
else:
self.out_projs.append(lowercase__ )
self.out_layers.append(nn.Linear(lowercase__ ,lowercase__ ) )
else:
for i in range(len(self.cutoffs ) ):
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase__ ,lowercase__ ) ) )
self.out_layers.append(nn.Linear(lowercase__ ,r_idx - l_idx ) )
__lowercase = keep_order
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Any ):
if proj is None:
__lowercase = nn.functional.linear(lowercase__ ,lowercase__ ,bias=lowercase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__lowercase = nn.functional.linear(lowercase__ ,proj.t().contiguous() )
__lowercase = nn.functional.linear(lowercase__ ,lowercase__ ,bias=lowercase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Any=None ,lowercase__ : List[str]=False ):
if labels is not None:
# Shift so that tokens < n predict n
__lowercase = hidden[..., :-1, :].contiguous()
__lowercase = labels[..., 1:].contiguous()
__lowercase = hidden.view(-1 ,hidden.size(-1 ) )
__lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
__lowercase = hidden.view(-1 ,hidden.size(-1 ) )
if self.n_clusters == 0:
__lowercase = self._compute_logit(lowercase__ ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
if labels is not None:
__lowercase = labels != -1_0_0
__lowercase = torch.zeros_like(lowercase__ ,dtype=hidden.dtype ,device=hidden.device )
__lowercase = (
-nn.functional.log_softmax(lowercase__ ,dim=-1 )[mask].gather(1 ,labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=-1 )
else:
# construct weights and biases
__lowercase , __lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = self.out_layers[0].weight[l_idx:r_idx]
__lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowercase = self.out_layers[i].weight
__lowercase = self.out_layers[i].bias
if i == 0:
__lowercase = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
__lowercase = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(lowercase__ )
biases.append(lowercase__ )
__lowercase , __lowercase , __lowercase = weights[0], biases[0], self.out_projs[0]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
if labels is None:
__lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__lowercase = torch.zeros_like(lowercase__ ,dtype=hidden.dtype ,device=hidden.device )
__lowercase = 0
__lowercase = [0] + self.cutoffs
for i in range(len(lowercase__ ) - 1 ):
__lowercase , __lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__lowercase = (labels >= l_idx) & (labels < r_idx)
__lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__lowercase = labels.index_select(0 ,lowercase__ ) - l_idx
__lowercase = head_logprob.index_select(0 ,lowercase__ )
__lowercase = hidden.index_select(0 ,lowercase__ )
else:
__lowercase = hidden
if i == 0:
if labels is not None:
__lowercase = head_logprob_i.gather(1 ,target_i[:, None] ).squeeze(1 )
else:
__lowercase = head_logprob[:, : self.cutoffs[0]]
else:
__lowercase , __lowercase , __lowercase = weights[i], biases[i], self.out_projs[i]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
__lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 ,target_i[:, None] ).squeeze(1 )
else:
__lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__lowercase = logprob_i
if labels is not None:
if (hasattr(self ,'''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 ,lowercase__ ,-logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Union[str, Any] ):
if self.n_clusters == 0:
__lowercase = self._compute_logit(lowercase__ ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
return nn.functional.log_softmax(lowercase__ ,dim=-1 )
else:
# construct weights and biases
__lowercase , __lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = self.out_layers[0].weight[l_idx:r_idx]
__lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowercase = self.out_layers[i].weight
__lowercase = self.out_layers[i].bias
if i == 0:
__lowercase = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
__lowercase = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(lowercase__ )
biases.append(lowercase__ )
__lowercase , __lowercase , __lowercase = weights[0], biases[0], self.out_projs[0]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
__lowercase = [0] + self.cutoffs
for i in range(len(lowercase__ ) - 1 ):
__lowercase , __lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__lowercase = head_logprob[:, : self.cutoffs[0]]
else:
__lowercase , __lowercase , __lowercase = weights[i], biases[i], self.out_projs[i]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
__lowercase = head_logprob[:, -i] + tail_logprob_i
__lowercase = logprob_i
return out
| 104 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCAmelCase ( snake_case_ ):
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "width_multiplier" ) )
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=64 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__="swish" , UpperCamelCase__=3 , UpperCamelCase__=32 , UpperCamelCase__=0.1 , UpperCamelCase__=0.02 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=10 , UpperCamelCase__=None , UpperCamelCase__=0.25 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , ) -> Any:
'''simple docstring'''
snake_case : str = parent
snake_case : Dict = batch_size
snake_case : Union[str, Any] = image_size
snake_case : Union[str, Any] = patch_size
snake_case : Tuple = num_channels
snake_case : Union[str, Any] = make_divisible(512 * width_multiplier , divisor=8 )
snake_case : int = hidden_act
snake_case : List[Any] = conv_kernel_size
snake_case : Optional[Any] = output_stride
snake_case : str = classifier_dropout_prob
snake_case : int = use_labels
snake_case : int = is_training
snake_case : str = num_labels
snake_case : List[Any] = initializer_range
snake_case : List[Any] = scope
snake_case : List[str] = width_multiplier
snake_case : str = ffn_dropout
snake_case : Dict = attn_dropout
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Dict = None
snake_case : List[Any] = None
if self.use_labels:
snake_case : str = ids_tensor([self.batch_size] , self.num_labels )
snake_case : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
snake_case : Tuple = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : Any = self.num_labels
snake_case : List[Any] = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Any = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = self.num_labels
snake_case : Optional[Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case ,snake_case : Optional[int] = config_and_inputs
snake_case : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
__UpperCAmelCase : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Dict = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = MobileViTVaModelTester(self )
snake_case : Optional[int] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case ,snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(UpperCamelCase__ )
snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Optional[Any] = [*signature.parameters.keys()]
snake_case : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
snake_case : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : str = outputs.hidden_states
snake_case : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case : str = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case ,snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : Tuple = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Tuple = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[Any] = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
UpperCamelCase__ )
snake_case : Optional[int] = self.default_image_processor
snake_case : int = prepare_img()
snake_case : Tuple = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case : str = model(**UpperCamelCase__ )
# verify the logits
snake_case : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
snake_case : Union[str, Any] = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case : Dict = model.to(UpperCamelCase__ )
snake_case : Optional[Any] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case : Optional[int] = prepare_img()
snake_case : str = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**UpperCamelCase__ )
snake_case : str = outputs.logits
# verify the logits
snake_case : int = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
snake_case : Optional[Any] = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case : int = model.to(UpperCamelCase__ )
snake_case : int = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case : Tuple = prepare_img()
snake_case : List[str] = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case : Optional[int] = model(**UpperCamelCase__ )
snake_case : List[str] = outputs.logits.detach().cpu()
snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
snake_case : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
snake_case : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
snake_case : Dict = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 112 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__snake_case = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def __lowerCAmelCase ( lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
snake_case : Optional[Any] = list(s_dict.keys() )
for key in keys:
snake_case : Any = R".*/layers_(\d+)"
snake_case : Tuple = key
if re.match(lowercase , lowercase ):
snake_case : List[str] = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowercase )
snake_case : Union[str, Any] = R"(encoder|decoder)\/"
if re.match(lowercase , lowercase ):
snake_case : Any = re.match(lowercase , lowercase ).groups()
if groups[0] == "encoder":
snake_case : Union[str, Any] = re.sub(R"/mlp/" , R"/1/mlp/" , lowercase )
snake_case : int = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowercase )
elif groups[0] == "decoder":
snake_case : str = re.sub(R"/mlp/" , R"/2/mlp/" , lowercase )
snake_case : List[str] = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowercase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
snake_case : int = new_key.replace(lowercase , lowercase )
print(F'{key} -> {new_key}' )
snake_case : Optional[Any] = s_dict.pop(lowercase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
snake_case : int = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
snake_case : Optional[Any] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
snake_case : Tuple = s_dict[key].shape[0]
snake_case : int = s_dict[key]
for idx in range(lowercase ):
snake_case : List[str] = expert_weihts[idx]
print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowercase )
return s_dict
__snake_case = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def __lowerCAmelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> int:
"""simple docstring"""
import regex as re
with open(lowercase , "r" ) as f:
snake_case : List[str] = f.read()
snake_case : Tuple = re.findall(R"(.*) = ([0-9.]*)" , lowercase )
snake_case : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
snake_case : Tuple = float(lowercase ) if "." in value else int(lowercase )
snake_case : List[str] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowercase )[0]
snake_case : List[Any] = str(activation[1] )
snake_case : Optional[Any] = num_experts
snake_case : List[Any] = SwitchTransformersConfig(**lowercase )
return config
def __lowerCAmelCase ( lowercase : Tuple , lowercase : Tuple , lowercase : Union[str, Any]=None , lowercase : Any="./" , lowercase : int=8 ) -> Dict:
"""simple docstring"""
print(F'Loading flax weights from : {flax_checkpoint_path}' )
snake_case : Union[str, Any] = checkpoints.load_tax_checkpoint(lowercase )
if gin_file is not None:
snake_case : List[str] = convert_gin_to_config(lowercase , lowercase )
else:
snake_case : str = SwitchTransformersConfig.from_pretrained(lowercase )
snake_case : Any = SwitchTransformersForConditionalGeneration(lowercase )
snake_case : Optional[Any] = flax_params["target"]
snake_case : Optional[int] = flatten_dict(lowercase , sep="/" )
snake_case : Optional[Any] = rename_keys(lowercase )
snake_case : List[str] = unflatten_dict(lowercase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowercase , lowercase )
print(F'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
__snake_case = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 112 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def a_ ( __lowercase : str ) -> str:
# vision encoder
if "img_encoder.pos_embed" in name:
_snake_case = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
_snake_case = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
_snake_case = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
_snake_case = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
_snake_case = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
_snake_case = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
_snake_case = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
_snake_case = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
_snake_case = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
_snake_case = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
_snake_case = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
_snake_case = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
_snake_case = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
_snake_case = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
_snake_case = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
_snake_case = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
_snake_case = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
_snake_case = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
_snake_case = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
_snake_case = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
_snake_case = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
_snake_case = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
_snake_case = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> Tuple:
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(__lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_snake_case = key.split('.' )
_snake_case , _snake_case = int(key_split[2] ), int(key_split[4] )
_snake_case = config.vision_config.hidden_size
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val[:dim]
_snake_case = val[dim : dim * 2]
_snake_case = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_snake_case = key.split('.' )
_snake_case = int(key_split[3] )
_snake_case = config.text_config.hidden_size
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[
dim : dim * 2, :
]
_snake_case = val[-dim:, :]
else:
_snake_case = val[:dim]
_snake_case = val[dim : dim * 2]
_snake_case = val[-dim:]
else:
_snake_case = rename_key(__lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_snake_case = val.squeeze_()
else:
_snake_case = val
return orig_state_dict
def a_ ( ) -> str:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : List[str] , __lowercase : List[str]="groupvit-gcc-yfcc" , __lowercase : Union[str, Any]=False ) -> List[str]:
_snake_case = GroupViTConfig()
_snake_case = GroupViTModel(__lowercase ).eval()
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
_snake_case = convert_state_dict(__lowercase , __lowercase )
_snake_case , _snake_case = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__lowercase ) == 0)
# verify result
_snake_case = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
_snake_case = prepare_img()
_snake_case = processor(text=['a photo of a cat', 'a photo of a dog'] , images=__lowercase , padding=__lowercase , return_tensors='pt' )
with torch.no_grad():
_snake_case = model(**__lowercase )
if model_name == "groupvit-gcc-yfcc":
_snake_case = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_snake_case = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(f'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , __lowercase , atol=1E-3 )
processor.save_pretrained(__lowercase )
model.save_pretrained(__lowercase )
print('Successfully saved processor and model to' , __lowercase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(__lowercase , organization='nielsr' )
model.push_to_hub(__lowercase , organization='nielsr' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 282 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : list[int] ):
'''simple docstring'''
_snake_case = len(lowercase )
_snake_case = [0] * len_array
if len_array > 0:
_snake_case = array[0]
for i in range(1 , lowercase ):
_snake_case = self.prefix_sum[i - 1] + array[i]
def A ( self : Optional[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A ( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
_snake_case = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def __lowercase ( _a , _a , _a , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
snake_case_ : Any = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
snake_case_ : List[str] = input_paths_and_base_extractors[compression_format]
if input_path is None:
snake_case_ : str = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_a )
assert base_extractor.is_extractable(_a )
snake_case_ : Optional[Any] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(_a , _a )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
snake_case_ : Union[str, Any] = file_path.read_text(encoding='''utf-8''' )
else:
snake_case_ : Union[str, Any] = output_path.read_text(encoding='''utf-8''' )
snake_case_ : Any = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def __lowercase ( _a , _a , _a , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
snake_case_ : str = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
snake_case_ : List[str] = input_paths[compression_format]
if input_path is None:
snake_case_ : str = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_a )
snake_case_ : str = Extractor.infer_extractor_format(_a )
assert extractor_format is not None
snake_case_ : List[str] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(_a , _a , _a )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
snake_case_ : Tuple = file_path.read_text(encoding='''utf-8''' )
else:
snake_case_ : str = output_path.read_text(encoding='''utf-8''' )
snake_case_ : List[str] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowercase ( _a , _a ):
"""simple docstring"""
import tarfile
snake_case_ : Union[str, Any] = tmp_path / '''data_dot_dot'''
directory.mkdir()
snake_case_ : List[Any] = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(_a , '''w''' ) as f:
f.add(_a , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def __lowercase ( _a ):
"""simple docstring"""
import tarfile
snake_case_ : Optional[Any] = tmp_path / '''data_sym_link'''
directory.mkdir()
snake_case_ : Any = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=_a )
with tarfile.TarFile(_a , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def __lowercase ( _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
snake_case_ : str = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
snake_case_ : Tuple = insecure_tar_files[insecure_tar_file]
snake_case_ : str = tmp_path / '''extracted'''
TarExtractor.extract(_a , _a )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowercase ( _a ):
"""simple docstring"""
snake_case_ : Any = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
snake_case_ : Any = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(_a )
assert zipfile.is_zipfile(str(_a ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(_a ) # but we're right
| 363 |
"""simple docstring"""
from math import pow
def __lowercase ( _a , _a , _a , _a , _a , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
snake_case_ : int = int(pow(_a , _a ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
snake_case_, snake_case_ : List[Any] = backtrack(
_a , _a , current_number + 1 , _a , _a )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
snake_case_, snake_case_ : str = backtrack(
_a , _a , current_number + 1 , _a , _a )
return current_sum, solutions_count
def __lowercase ( _a , _a ):
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(_a , _a , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = "Wav2Vec2FeatureExtractor"
SCREAMING_SNAKE_CASE : Dict = "AutoTokenizer"
def __init__( self : str , _UpperCamelCase : Dict , _UpperCamelCase : str ) ->Dict:
super().__init__(_UpperCamelCase , _UpperCamelCase )
snake_case_ = self.feature_extractor
snake_case_ = False
@classmethod
def snake_case__( cls : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : List[str] ) ->Dict:
try:
return super().from_pretrained(_UpperCamelCase , **_UpperCamelCase )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _UpperCamelCase , )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = WavaVecaCTCTokenizer.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
return cls(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
def __call__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : Dict ) ->Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCamelCase , **_UpperCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
snake_case_ = kwargs.pop('''raw_speech''' )
else:
snake_case_ = kwargs.pop('''audio''' , _UpperCamelCase )
snake_case_ = kwargs.pop('''sampling_rate''' , _UpperCamelCase )
snake_case_ = kwargs.pop('''text''' , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
snake_case_ = args[0]
snake_case_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
snake_case_ = self.feature_extractor(_UpperCamelCase , *_UpperCamelCase , sampling_rate=_UpperCamelCase , **_UpperCamelCase )
if text is not None:
snake_case_ = self.tokenizer(_UpperCamelCase , **_UpperCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case_ = encodings['''input_ids''']
return inputs
def snake_case__( self : Any , *_UpperCamelCase : List[str] , **_UpperCamelCase : Optional[int] ) ->int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCamelCase , **_UpperCamelCase )
snake_case_ = kwargs.pop('''input_features''' , _UpperCamelCase )
snake_case_ = kwargs.pop('''labels''' , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
snake_case_ = args[0]
snake_case_ = args[1:]
if input_features is not None:
snake_case_ = self.feature_extractor.pad(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
if labels is not None:
snake_case_ = self.tokenizer.pad(_UpperCamelCase , **_UpperCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
snake_case_ = labels['''input_ids''']
return input_features
def snake_case__( self : Union[str, Any] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Optional[int]:
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : List[str] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : str ) ->Optional[Any]:
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@contextmanager
def snake_case__( self : str ) ->List[Any]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
snake_case_ = True
snake_case_ = self.tokenizer
yield
snake_case_ = self.feature_extractor
snake_case_ = False | 8 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case :Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case :List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case :List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 )
__a = parent_a[:random_slice] + parent_a[random_slice:]
__a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = []
# Generate more children proportionally to the fitness score.
__a = int(parent_a[1] * 100 ) + 1
__a = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
__a = population_score[random.randint(0 , _UpperCAmelCase )][0]
__a , __a = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
__a = []
for _ in range(_UpperCAmelCase ):
population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
__a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
__a = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case :Optional[int] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__snake_case :List[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 49 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : int ="""codegen"""
lowercase : int ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase_=5_0400 , UpperCamelCase_=2048 , UpperCamelCase_=2048 , UpperCamelCase_=4096 , UpperCamelCase_=28 , UpperCamelCase_=16 , UpperCamelCase_=64 , UpperCamelCase_=None , UpperCamelCase_="gelu_new" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.02 , UpperCamelCase_=True , UpperCamelCase_=5_0256 , UpperCamelCase_=5_0256 , UpperCamelCase_=False , **UpperCamelCase_ , ):
lowercase_ :Dict = vocab_size
lowercase_ :List[str] = n_ctx
lowercase_ :List[str] = n_positions
lowercase_ :Dict = n_embd
lowercase_ :List[Any] = n_layer
lowercase_ :Any = n_head
lowercase_ :List[Any] = n_inner
lowercase_ :List[str] = rotary_dim
lowercase_ :str = activation_function
lowercase_ :Any = resid_pdrop
lowercase_ :Dict = embd_pdrop
lowercase_ :str = attn_pdrop
lowercase_ :Any = layer_norm_epsilon
lowercase_ :List[str] = initializer_range
lowercase_ :Optional[int] = use_cache
lowercase_ :Dict = bos_token_id
lowercase_ :Optional[int] = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , tie_word_embeddings=UpperCamelCase_ , **UpperCamelCase_ )
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = "default" , UpperCamelCase_ = None , UpperCamelCase_ = False , ):
super().__init__(UpperCamelCase_ , task=UpperCamelCase_ , patching_specs=UpperCamelCase_ , use_past=UpperCamelCase_ )
if not getattr(self._config , '''pad_token_id''' , UpperCamelCase_ ):
# TODO: how to do that better?
lowercase_ :List[str] = 0
@property
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction='''inputs''' )
lowercase_ :Dict = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase_ :str = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase ( self ):
return self._config.n_layer
@property
def UpperCamelCase ( self ):
return self._config.n_head
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , ):
lowercase_ :Optional[int] = super(UpperCamelCase_ , self ).generate_dummy_inputs(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
# We need to order the input in the way they appears in the forward()
lowercase_ :Tuple = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase_ , lowercase_ :str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase_ :List[Any] = seqlen + 2
lowercase_ :str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase_ :str = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(self.num_layers )
]
lowercase_ :Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
lowercase_ :Dict = ordered_inputs['''attention_mask'''].dtype
lowercase_ :Optional[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase ( self ):
return 13
| 252 |
from __future__ import annotations
from random import random
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ = None ):
lowercase_ :Tuple = value
lowercase_ :Tuple = random()
lowercase_ :Node | None = None
lowercase_ :Node | None = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return f"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{f"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 )
def __str__( self ):
lowercase_ :Optional[int] = str(self.value ) + ''' '''
lowercase_ :List[str] = str(self.left or '''''' )
lowercase_ :List[Any] = str(self.right or '''''' )
return value + left + right
def UpperCamelCase ( _a , _a ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase_ , lowercase_ :List[Any] = split(root.left , _a )
return left, root
else:
lowercase_ , lowercase_ :Tuple = split(root.right , _a )
return root, right
def UpperCamelCase ( _a , _a ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase_ :Tuple = merge(left.right , _a )
return left
else:
lowercase_ :Optional[int] = merge(_a , right.left )
return right
def UpperCamelCase ( _a , _a ) -> Node | None:
'''simple docstring'''
lowercase_ :str = Node(_a )
lowercase_ , lowercase_ :Dict = split(_a , _a )
return merge(merge(_a , _a ) , _a )
def UpperCamelCase ( _a , _a ) -> Node | None:
'''simple docstring'''
lowercase_ , lowercase_ :List[str] = split(_a , value - 1 )
lowercase_ , lowercase_ :Tuple = split(_a , _a )
return merge(_a , _a )
def UpperCamelCase ( _a ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( _a , _a ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase_ :Any = insert(_a , int(arg[1:] ) )
elif arg[0] == "-":
lowercase_ :Optional[int] = erase(_a , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :List[Any] = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase_ :Optional[Any] = input()
while args != "q":
lowercase_ :Union[str, Any] = interact_treap(_a , _a )
print(_a )
lowercase_ :str = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 252 | 1 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase = tmp_path / """cache"""
lowerCAmelCase = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase = tmp_path / """cache"""
lowerCAmelCase = {"""text""": """string"""}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = tmp_path / """cache"""
lowerCAmelCase = {"""text""": """string"""}
lowerCAmelCase = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = text_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [text_path]
lowerCAmelCase = tmp_path / """cache"""
lowerCAmelCase = {"""text""": """string"""}
lowerCAmelCase = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase = tmp_path / """cache"""
lowerCAmelCase = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCAmelCase = {"""text""": """string"""}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if split:
lowerCAmelCase = {split: text_path}
else:
lowerCAmelCase = """train"""
lowerCAmelCase = {"""train""": text_path, """test""": text_path}
lowerCAmelCase = tmp_path / """cache"""
lowerCAmelCase = {"""text""": """string"""}
lowerCAmelCase = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 46 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(SCREAMING_SNAKE_CASE )-1}' )
if "norm" in key:
lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(SCREAMING_SNAKE_CASE )-1}' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(SCREAMING_SNAKE_CASE )-1}' )
if "attn.q" in key:
lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(SCREAMING_SNAKE_CASE )-1}' )
if "bot_conv" in key:
lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
lowerCAmelCase = value
return new_state_dict
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCAmelCase = rename_keys(SCREAMING_SNAKE_CASE )
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 46 | 1 |
"""simple docstring"""
import math
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( UpperCamelCase__ = 10_001 ):
"""simple docstring"""
try:
A__ = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
A__ = []
A__ = 2
while len(UpperCamelCase__ ) < nth:
if is_prime(UpperCamelCase__ ):
primes.append(UpperCamelCase__ )
num += 1
else:
num += 1
return primes[len(UpperCamelCase__ ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 154 | """simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCAmelCase : Any =get_logger()
__lowerCAmelCase : Optional[dict] =None
class _A ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(features=__lowerCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
f'Expected {device} to be a `str` not {type(__lowerCAmelCase )}, as `jaxlib.xla_extension.Device` '
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
lowercase = device if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'Device with string identifier {self.device} not listed among the available '
f'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
f'device: {str(jax.devices()[0] )}.' )
lowercase = str(jax.devices()[0] )
lowercase = jnp_array_kwargs
@staticmethod
def A__ ( ):
"""simple docstring"""
import jax
return {str(__lowerCAmelCase ): device for device in jax.devices()}
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and column:
if all(
isinstance(__lowerCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__lowerCAmelCase , axis=0 )
return column
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(__lowerCAmelCase , (str, bytes, type(__lowerCAmelCase )) ):
return value
elif isinstance(__lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase = {}
if isinstance(__lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowercase = {"""dtype""": jnp.intaa}
else:
lowercase = {"""dtype""": jnp.intaa}
elif isinstance(__lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCAmelCase , PIL.Image.Image ):
lowercase = np.asarray(__lowerCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__lowerCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__lowerCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__lowerCAmelCase , """__array__""" ) and not isinstance(__lowerCAmelCase , jax.Array ):
lowercase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(__lowerCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , __lowerCAmelCase , map_list=__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.numpy_arrow_extractor().extract_row(__lowerCAmelCase )
lowercase = self.python_features_decoder.decode_row(__lowerCAmelCase )
return self.recursive_tensorize(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.numpy_arrow_extractor().extract_column(__lowerCAmelCase )
lowercase = self.python_features_decoder.decode_column(__lowerCAmelCase , pa_table.column_names[0] )
lowercase = self.recursive_tensorize(__lowerCAmelCase )
lowercase = self._consolidate(__lowerCAmelCase )
return column
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.numpy_arrow_extractor().extract_batch(__lowerCAmelCase )
lowercase = self.python_features_decoder.decode_batch(__lowerCAmelCase )
lowercase = self.recursive_tensorize(__lowerCAmelCase )
for column_name in batch:
lowercase = self._consolidate(batch[column_name] )
return batch
| 197 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase : Any = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
lowerCamelCase : int = parser.parse_args()
if args.check_lib:
lowerCamelCase : Optional[int] = importlib.import_module('transformers')
lowerCamelCase : List[str] = Path(transformers_module.__file__).parent
else:
lowerCamelCase : Optional[int] = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 124 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase : Tuple = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 251 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A_( A : List[Any]):
UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(A , A)
def A_( A : Any):
UpperCamelCase = list(s_dict.keys())
for key in keys:
if "transformer_layers" in key:
UpperCamelCase = s_dict.pop(A)
elif "subsample" in key:
UpperCamelCase = s_dict.pop(A)
def A_( A : Optional[int]):
UpperCamelCase , UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(A , A , bias=A)
UpperCamelCase = emb.weight.data
return lin_layer
def A_( A : Optional[int] , A : List[str]):
UpperCamelCase = torch.load(A , map_location='cpu')
UpperCamelCase = mam_aaa['args']
UpperCamelCase = mam_aaa['model']
UpperCamelCase = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(A)
rename_keys(A)
UpperCamelCase = state_dict['decoder.embed_tokens.weight'].shape[0]
UpperCamelCase = args.share_decoder_input_output_embed
UpperCamelCase = [int(A) for i in args.conv_kernel_sizes.split(',')]
UpperCamelCase = SpeechaTextConfig(
vocab_size=A , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(A) , conv_channels=args.conv_channels , conv_kernel_sizes=A , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=A , num_beams=5 , max_length=200 , use_cache=A , decoder_start_token_id=2 , early_stopping=A , )
UpperCamelCase = SpeechaTextForConditionalGeneration(A)
UpperCamelCase , UpperCamelCase = model.model.load_state_dict(A , strict=A)
if len(A) > 0 and not set(A) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f''' but all the following weights are missing {missing}''')
if tie_embeds:
UpperCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens)
else:
UpperCamelCase = lm_head_weights
model.save_pretrained(A)
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase : List[str] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 251 | 1 |
def lowerCamelCase__ ( snake_case_ : list ) -> list:
if len(snake_case_ ) <= 1:
return lst
__snake_case = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__snake_case , __snake_case = lst[i], lst[i - 1]
i -= 1
if i == 0:
__snake_case = 1
return lst
if __name__ == "__main__":
snake_case_ = input('Enter numbers separated by a comma:\n').strip()
snake_case_ = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 24 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = DistilBertTokenizer
__lowerCAmelCase = DistilBertTokenizerFast
__lowerCAmelCase = True
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
a =tokenizer.encode('''sequence builders''' , add_special_tokens=__A )
a =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A )
a =tokenizer.build_inputs_with_special_tokens(__A )
a =tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
] | 81 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase__ : Any = logging.getLogger(__name__)
def a__ ( lowercase : Optional[Any], lowercase : Tuple ) -> Any:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
_snake_case : str = field(metadata={'help': 'Should contain the data files for the task.'} )
_snake_case : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_snake_case : bool = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', lowercase )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(lowercase )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=lowercase, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=lowercase, cache_dir=model_args.cache_dir, )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(lowercase : EvalPrediction ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(lowercase, p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(lowercase, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=lowercase, args=lowercase, train_dataset=lowercase, eval_dataset=lowercase, compute_metrics=lowercase, data_collator=lowercase, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(lowercase, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', lowercase, lowercase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(lowercase )
return results
def a__ ( lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 287 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : Dict ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCamelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_UpperCamelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 287 | 1 |
"""simple docstring"""
from math import isclose, sqrt
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> tuple[float, float, float]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = point_y / 4 / point_x
lowerCAmelCase_ :Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase_ :Union[str, Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase_ :str = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase_ :Tuple = outgoing_gradient**2 + 4
lowerCAmelCase_ :Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase_ :str = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
lowerCAmelCase_ :Optional[Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase_ :Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase_ :List[Any] = x_minus if isclose(lowercase__ , lowercase__ ) else x_plus
lowerCAmelCase_ :List[str] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _snake_case ( lowercase__ : float = 1.4 , lowercase__ : float = -9.6 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :float = first_x_coord
lowerCAmelCase_ :float = first_y_coord
lowerCAmelCase_ :float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_point(lowercase__ , lowercase__ , lowercase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84 | from ...processing_utils import ProcessorMixin
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''feature_extractor''']
lowerCamelCase__ = '''TvltImageProcessor'''
lowerCamelCase__ = '''TvltFeatureExtractor'''
def __init__( self : List[str] , __magic_name__ : Any , __magic_name__ : Any ) -> int:
super().__init__(image_processor=__magic_name__ , feature_extractor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = image_processor
SCREAMING_SNAKE_CASE_ = feature_extractor
def __call__( self : List[str] , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : int=None , __magic_name__ : str=None , __magic_name__ : Any=False , __magic_name__ : int=False , *__magic_name__ : int , **__magic_name__ : Any , ) -> List[Any]:
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
SCREAMING_SNAKE_CASE_ = None
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , mask_pixel=__magic_name__ , *__magic_name__ , **__magic_name__ )
if images_mixed is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , is_mixed=__magic_name__ , *__magic_name__ , **__magic_name__ )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(
__magic_name__ , *__magic_name__ , sampling_rate=__magic_name__ , mask_audio=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = {}
if audio is not None:
output_dict.update(__magic_name__ )
if images is not None:
output_dict.update(__magic_name__ )
if images_mixed_dict is not None:
output_dict.update(__magic_name__ )
return output_dict
@property
def __A ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 118 | 0 |
snake_case_ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
snake_case_ = [None] * 1000_0000
snake_case_ = True
snake_case_ = False
def _lowerCAmelCase ( lowercase_ ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase = chain(next_number(_lowerCAmelCase ) )
UpperCAmelCase = number_chain
while number < 10000000:
UpperCAmelCase = number_chain
number *= 10
return number_chain
def _lowerCAmelCase ( lowercase_ = 10000000 ):
for i in range(1 , _lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 371 |
"""simple docstring"""
def _lowerCAmelCase ( ):
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = 1
UpperCAmelCase = 2
while i * i <= n:
UpperCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _lowerCAmelCase ( ):
return next(i for i in triangle_number_generator() if count_divisors(lowercase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 181 | 0 |
class _lowercase :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
UpperCamelCase_ = {}
def _lowerCamelCase ( self ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(snake_case__ , " -> " , " -> ".join([str(snake_case__ ) for j in self.vertex[i]] ) )
def _lowerCamelCase ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(snake_case__ )
else:
# else make a new vertex
UpperCamelCase_ = [to_vertex]
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(snake_case__ , snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = True
print(snake_case__ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase : Dict =Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 128 |
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :int ) -> Dict:
__UpperCamelCase : Union[str, Any] = {}
def _lowerCamelCase ( self :str ) -> None:
print(self.vertex )
for i in self.vertex:
print(a , " -> " , " -> ".join([str(a ) for j in self.vertex[i]] ) )
def _lowerCamelCase ( self :List[Any] , a :int , a :int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(a )
else:
# else make a new vertex
__UpperCamelCase : Optional[Any] = [to_vertex]
def _lowerCamelCase ( self :Tuple ) -> None:
# visited array for storing already visited nodes
__UpperCamelCase : Dict = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(a , a )
def _lowerCamelCase ( self :Any , a :int , a :list ) -> None:
# mark start vertex as visited
__UpperCamelCase : int = True
print(a , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(a , a )
if __name__ == "__main__":
lowercase : Dict = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 232 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__)
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : Any = hans_processors[task]()
__snake_case : int = os.path.join(
a_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , )
__snake_case : Tuple = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Dict = label_list[2], label_list[1]
__snake_case : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : int = cached_features_file + '''.lock'''
with FileLock(a_ ):
if os.path.exists(a_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case : Union[str, Any] = torch.load(a_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case : Dict = (
processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
)
logger.info('''Training examples: %s''' , len(a_ ) )
__snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
logger.info('''Saving features into cached file %s''' , a_ )
torch.save(self.features , a_ )
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : List[Any] = hans_processors[task]()
__snake_case : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Tuple = label_list[2], label_list[1]
__snake_case : Dict = label_list
__snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
__snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case : Union[str, Any] = tf.data.Dataset.from_generator(
a_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.dataset
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = []
for i, line in enumerate(a_ ):
if i == 0:
continue
__snake_case : Tuple = '''%s-%s''' % (set_type, line[0])
__snake_case : Dict = line[5]
__snake_case : int = line[6]
__snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case : List[Any] = line[0]
examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) )
return examples
def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]:
"""simple docstring"""
__snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )}
__snake_case : Tuple = []
for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , )
__snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0
__snake_case : Union[str, Any] = int(example.pairID )
features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE : Dict = {
"""hans""": 3,
}
SCREAMING_SNAKE_CASE : str = {
"""hans""": HansProcessor,
}
| 24 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case :Tuple = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Any = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[Any] = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ):
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : List[Any]=None ):
"""simple docstring"""
UpperCAmelCase__ = {}
if top_k is not None:
UpperCAmelCase__ = top_k
return {}, {}, postprocess_params
def __call__( self : Any , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : str ):
"""simple docstring"""
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = load_image(_UpperCAmelCase )
UpperCAmelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.model(**_UpperCAmelCase )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase__ = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase__ , UpperCAmelCase__ = probs.topk(_UpperCAmelCase )
elif self.framework == "tf":
UpperCAmelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase__ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase__ = scores.tolist()
UpperCAmelCase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
| 346 | 0 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_snake_case = None
try:
import msvcrt
except ImportError:
_snake_case = None
try:
import fcntl
except ImportError:
_snake_case = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_snake_case = OSError
# Data
# ------------------------------------------------
_snake_case = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
_snake_case = '3.0.12'
_snake_case = None
def lowerCAmelCase__ ( ):
'''simple docstring'''
global _logger
_a : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class UpperCamelCase ( snake_case_ ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> int:
_a : List[str] = lock_file
return None
def __str__( self : Dict ) -> List[Any]:
_a : str = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class UpperCamelCase :
def __init__( self : str , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
_a : Tuple = lock
return None
def __enter__( self : List[str] ) -> Optional[Any]:
return self.lock
def __exit__( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ) -> int:
self.lock.release()
return None
class UpperCamelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int=-1 , UpperCAmelCase__ : Tuple=None ) -> Any:
_a : int = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
_a : Any = self.hash_filename_if_too_long(UpperCAmelCase__ , UpperCAmelCase__ )
# The path to the lock file.
_a : List[str] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a : Optional[Any] = None
# The default timeout value.
_a : List[str] = timeout
# We use this lock primarily for the lock counter.
_a : List[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a : Tuple = 0
return None
@property
def _lowercase ( self : Dict ) -> Union[str, Any]:
return self._lock_file
@property
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
return self._timeout
@timeout.setter
def _lowercase ( self : Dict , UpperCAmelCase__ : int ) -> List[str]:
_a : int = float(UpperCAmelCase__ )
return None
def _lowercase ( self : Tuple ) -> Tuple:
raise NotImplementedError()
def _lowercase ( self : List[Any] ) -> List[str]:
raise NotImplementedError()
@property
def _lowercase ( self : Tuple ) -> int:
return self._lock_file_fd is not None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=0.0_5 ) -> Optional[Any]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a : str = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a : Any = id(self )
_a : List[str] = self._lock_file
_a : Any = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(UpperCAmelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a : Tuple = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Any=False ) -> Union[str, Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a : Any = id(self )
_a : List[Any] = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
_a : Optional[int] = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self : Union[str, Any] ) -> Tuple:
self.acquire()
return self
def __exit__( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int ) -> List[str]:
self.release()
return None
def __del__( self : str ) -> Optional[Any]:
self.release(force=UpperCAmelCase__ )
return None
def _lowercase ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> str:
_a : Optional[Any] = os.path.basename(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > max_length and max_length > 0:
_a : Optional[int] = os.path.dirname(UpperCAmelCase__ )
_a : Optional[int] = str(hash(UpperCAmelCase__ ) )
_a : Union[str, Any] = filename[: max_length - len(UpperCAmelCase__ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
else:
return path
class UpperCamelCase ( snake_case_ ):
def __init__( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int]=-1 , UpperCAmelCase__ : Dict=None ) -> str:
from .file_utils import relative_to_absolute_path
super().__init__(UpperCAmelCase__ , timeout=UpperCAmelCase__ , max_filename_length=UpperCAmelCase__ )
_a : List[str] = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def _lowercase ( self : Optional[Any] ) -> str:
_a : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a : int = os.open(self._lock_file , UpperCAmelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(UpperCAmelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(UpperCAmelCase__ )
else:
_a : str = fd
return None
def _lowercase ( self : Union[str, Any] ) -> str:
_a : Union[str, Any] = self._lock_file_fd
_a : Union[str, Any] = None
msvcrt.locking(UpperCAmelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(UpperCAmelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCamelCase ( snake_case_ ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=-1 , UpperCAmelCase__ : List[str]=None ) -> Union[str, Any]:
_a : List[str] = os.statvfs(os.path.dirname(UpperCAmelCase__ ) ).f_namemax
super().__init__(UpperCAmelCase__ , timeout=UpperCAmelCase__ , max_filename_length=UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> str:
_a : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a : Tuple = os.open(self._lock_file , UpperCAmelCase__ )
try:
fcntl.flock(UpperCAmelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(UpperCAmelCase__ )
else:
_a : str = fd
return None
def _lowercase ( self : List[str] ) -> str:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a : int = self._lock_file_fd
_a : Dict = None
fcntl.flock(UpperCAmelCase__ , fcntl.LOCK_UN )
os.close(UpperCAmelCase__ )
return None
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
_a : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a : Any = os.open(self._lock_file , UpperCAmelCase__ )
except OSError:
pass
else:
_a : Dict = fd
return None
def _lowercase ( self : List[str] ) -> int:
os.close(self._lock_file_fd )
_a : Any = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_snake_case = None
if msvcrt:
_snake_case = WindowsFileLock
elif fcntl:
_snake_case = UnixFileLock
else:
_snake_case = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 351 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_snake_case = logging.getLogger(__name__)
_snake_case = 'pytorch_model.bin'
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''The name of the task to train on.'''} , )
UpperCamelCase : Optional[List[str]] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Random seed for initialization.'''} , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_a : Union[str, Any] = dataset.filter(lambda UpperCamelCase__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_a : Any = int(eval_result * len(UpperCamelCase__ ) )
print(UpperCamelCase__ )
_a : str = dataset.sort("""probability""" , reverse=UpperCamelCase__ )
_a : Any = dataset.select(range(UpperCamelCase__ ) )
_a : Tuple = dataset.remove_columns(["""label""", """probability"""] )
_a : Optional[Any] = dataset.rename_column("""prediction""" , """label""" )
_a : Dict = dataset.map(lambda UpperCamelCase__ : {"label": idalabel[example["label"]]} )
_a : Union[str, Any] = dataset.shuffle(seed=args.seed )
_a : Optional[int] = os.path.join(UpperCamelCase__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(UpperCamelCase__ , index=UpperCamelCase__ )
else:
dataset.to_json(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_a : Dict = STModelArguments(model_name_or_path=UpperCamelCase__ )
_a : Union[str, Any] = STDataArguments(train_file=UpperCamelCase__ , infer_file=UpperCamelCase__ )
_a : Any = STTrainingArguments(output_dir=UpperCamelCase__ )
_a : Any = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(UpperCamelCase__ ).items():
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for key, value in kwargs.items():
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Sanity checks
_a : Union[str, Any] = {}
_a : Tuple = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_a : int = args.train_file
_a : List[Any] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_a : Union[str, Any] = args.eval_file
for key in data_files:
_a : Optional[Any] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
_a : str = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
_a : Tuple = F"""{args.output_dir}/self-train_iter-{{}}""".format
_a : Dict = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
accelerator.wait_for_everyone()
_a : str = None
_a : int = None
_a : str = 0
_a : List[Any] = False
# Show the progress bar
_a : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_a : Union[str, Any] = data_dir_format(UpperCamelCase__ )
assert os.path.exists(UpperCamelCase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_a : str = os.path.join(UpperCamelCase__ , """stage-1""" )
_a : Tuple = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
arguments_dict.update({key: value} )
_a : int = os.path.join(UpperCamelCase__ , """best-checkpoint""" , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , UpperCamelCase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_a : Dict = os.path.join(UpperCamelCase__ , """best-checkpoint""" )
_a : List[str] = os.path.join(UpperCamelCase__ , """stage-2""" )
# Update arguments_dict
_a : int = model_path
_a : Dict = data_files["""train"""]
_a : int = current_output_dir
_a : Any = os.path.join(UpperCamelCase__ , """best-checkpoint""" , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , UpperCamelCase__ )
_a : List[Any] = iteration
_a : int = data_dir_format(iteration + 1 )
_a : Dict = AutoConfig.from_pretrained(os.path.join(UpperCamelCase__ , """best-checkpoint""" ) )
_a : Union[str, Any] = config.idalabel
_a : Any = os.path.join(UpperCamelCase__ , """eval_results_best-checkpoint.json""" )
_a : Any = os.path.join(UpperCamelCase__ , """test_results_best-checkpoint.json""" )
assert os.path.exists(UpperCamelCase__ )
with open(UpperCamelCase__ , """r""" ) as f:
_a : Tuple = float(json.load(UpperCamelCase__ )[args.eval_metric] )
_a : Dict = os.path.join(UpperCamelCase__ , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(UpperCamelCase__ )
# Loading the dataset from local csv or json files.
_a : List[Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
_a : Any = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(UpperCamelCase__ ):
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.wait_for_everyone()
_a : List[str] = os.path.join(UpperCamelCase__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_a : Any = eval_result
if best_iteration is None:
_a : Union[str, Any] = new_iteration
_a : str = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_a : Union[str, Any] = new_iteration
_a : List[str] = new_eval_result
_a : Optional[Any] = 0
else:
if new_eval_result == best_eval_result:
_a : Tuple = new_iteration
_a : List[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_a : Union[str, Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , UpperCamelCase__ )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(UpperCamelCase__ , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(UpperCamelCase__ , """eval_results_best-iteration.json""" ) , )
| 324 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , __A=13 , __A=32 , __A=3 , __A=4 , __A=[10, 20, 30, 40] , __A=[2, 2, 3, 2] , __A=True , __A=True , __A=37 , __A="gelu" , __A=10 , __A=0.02 , __A=["stage2", "stage3", "stage4"] , __A=3 , __A=None , ):
"""simple docstring"""
lowerCamelCase : Any = parent
lowerCamelCase : List[str] = batch_size
lowerCamelCase : Dict = image_size
lowerCamelCase : Union[str, Any] = num_channels
lowerCamelCase : Optional[int] = num_stages
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = is_training
lowerCamelCase : Dict = use_labels
lowerCamelCase : Dict = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = type_sequence_label_size
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : List[str] = out_features
lowerCamelCase : List[Any] = num_labels
lowerCamelCase : Tuple = scope
lowerCamelCase : List[Any] = num_stages
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _snake_case ( self ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=snake_case_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=snake_case_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def _snake_case ( self , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = UperNetForSemanticSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowerCamelCase : Dict = model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( A_ , A_ , unittest.TestCase ):
'''simple docstring'''
__A : str = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__A : str = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
__A : Any = False
__A : Dict = False
__A : Optional[int] = False
__A : int = False
__A : int = False
__A : Union[str, Any] = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = UperNetModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def _snake_case ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self ):
"""simple docstring"""
return
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Union[str, Any] = model_class(snake_case_ )
lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Dict = [*signature.parameters.keys()]
lowerCamelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="UperNet does not have a base model" )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="UperNet does not have a base model" )
def _snake_case ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(__A , __A , __A ):
lowerCamelCase : Optional[int] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
lowerCamelCase : int = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
lowerCamelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : str = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : str = _config_zero_init(snake_case_ )
lowerCamelCase : Union[str, Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=snake_case_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def _snake_case ( self ):
"""simple docstring"""
pass
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : str = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
lowerCamelCase : Union[str, Any] = Image.open(lowercase_ ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
lowerCamelCase : int = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(snake_case_ )
lowerCamelCase : Optional[Any] = prepare_img()
lowerCamelCase : Optional[int] = processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
with torch.no_grad():
lowerCamelCase : int = model(**snake_case_ )
lowerCamelCase : str = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case_ )
lowerCamelCase : Optional[int] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
lowerCamelCase : str = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(snake_case_ )
lowerCamelCase : int = prepare_img()
lowerCamelCase : Tuple = processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
with torch.no_grad():
lowerCamelCase : Tuple = model(**snake_case_ )
lowerCamelCase : Optional[int] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case_ )
lowerCamelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 ) )
| 283 |
"""simple docstring"""
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
from torch.utils.cpp_extension import load
A__ = Path(lowercase_ ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
A__ = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , lowercase_ , with_cuda=lowercase_ , extra_include_paths=[str(lowercase_ )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 247 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : List[Any] =logging.get_logger(__name__)
A__ : int ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
A__ : Dict ={
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
A__ : int ={
'''ctrl''': 2_56,
}
A__ : Optional[Any] ={
'''Pregnancy''': 16_86_29,
'''Christianity''': 76_75,
'''Explain''': 10_64_23,
'''Fitness''': 6_34_40,
'''Saving''': 6_31_63,
'''Ask''': 2_71_71,
'''Ass''': 9_59_85,
'''Joke''': 16_35_09,
'''Questions''': 4_56_22,
'''Thoughts''': 4_96_05,
'''Retail''': 5_23_42,
'''Feminism''': 16_43_38,
'''Writing''': 1_19_92,
'''Atheism''': 19_22_63,
'''Netflix''': 4_86_16,
'''Computing''': 3_96_39,
'''Opinion''': 4_32_13,
'''Alone''': 4_49_67,
'''Funny''': 5_89_17,
'''Gaming''': 4_03_58,
'''Human''': 40_88,
'''India''': 13_31,
'''Joker''': 7_71_38,
'''Diet''': 3_62_06,
'''Legal''': 1_18_59,
'''Norman''': 49_39,
'''Tip''': 7_26_89,
'''Weight''': 5_23_43,
'''Movies''': 4_62_73,
'''Running''': 2_34_25,
'''Science''': 20_90,
'''Horror''': 3_77_93,
'''Confession''': 6_05_72,
'''Finance''': 1_22_50,
'''Politics''': 1_63_60,
'''Scary''': 19_19_85,
'''Support''': 1_26_54,
'''Technologies''': 3_25_16,
'''Teenage''': 6_61_60,
'''Event''': 3_27_69,
'''Learned''': 6_74_60,
'''Notion''': 18_27_70,
'''Wikipedia''': 3_75_83,
'''Books''': 66_65,
'''Extract''': 7_60_50,
'''Confessions''': 10_27_01,
'''Conspiracy''': 7_59_32,
'''Links''': 6_36_74,
'''Narcissus''': 15_04_25,
'''Relationship''': 5_47_66,
'''Relationships''': 13_47_96,
'''Reviews''': 4_16_71,
'''News''': 42_56,
'''Translation''': 2_68_20,
'''multilingual''': 12_84_06,
}
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = set()
_lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase = char
_lowerCAmelCase = set(lowerCAmelCase )
return pairs
class UpperCAmelCase ( snake_case_ ):
_lowercase: int = VOCAB_FILES_NAMES
_lowercase: str = PRETRAINED_VOCAB_FILES_MAP
_lowercase: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: Optional[Any] = CONTROL_CODES
def __init__( self : Optional[Any] , __snake_case : Tuple , __snake_case : int , __snake_case : Dict="<unk>" , **__snake_case : Tuple ) -> Optional[int]:
super().__init__(unk_token=__snake_case , **__snake_case )
with open(__snake_case , encoding="""utf-8""" ) as vocab_handle:
_lowerCAmelCase = json.load(__snake_case )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(__snake_case , encoding="""utf-8""" ) as merges_handle:
_lowerCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
_lowerCAmelCase = [tuple(merge.split() ) for merge in merges]
_lowerCAmelCase = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_lowerCAmelCase = {}
@property
def lowercase__ ( self : Any ) -> Tuple:
return len(self.encoder )
def lowercase__ ( self : Union[str, Any] ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : List[str] , __snake_case : Optional[Any] ) -> Optional[int]:
if token in self.cache:
return self.cache[token]
_lowerCAmelCase = tuple(__snake_case )
_lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_lowerCAmelCase = get_pairs(__snake_case )
if not pairs:
return token
while True:
_lowerCAmelCase = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase = bigram
_lowerCAmelCase = []
_lowerCAmelCase = 0
while i < len(__snake_case ):
try:
_lowerCAmelCase = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase = tuple(__snake_case )
_lowerCAmelCase = new_word
if len(__snake_case ) == 1:
break
else:
_lowerCAmelCase = get_pairs(__snake_case )
_lowerCAmelCase = """@@ """.join(__snake_case )
_lowerCAmelCase = word[:-4]
_lowerCAmelCase = word
return word
def lowercase__ ( self : Tuple , __snake_case : Dict ) -> Optional[int]:
_lowerCAmelCase = []
_lowerCAmelCase = re.findall(R"""\S+\n?""" , __snake_case )
for token in words:
split_tokens.extend(list(self.bpe(__snake_case ).split(""" """ ) ) )
return split_tokens
def lowercase__ ( self : Union[str, Any] , __snake_case : Dict ) -> Tuple:
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : str , __snake_case : List[str] ) -> str:
return self.decoder.get(__snake_case , self.unk_token )
def lowercase__ ( self : Dict , __snake_case : str ) -> str:
_lowerCAmelCase = """ """.join(__snake_case ).replace("""@@ """ , """""" ).strip()
return out_string
def lowercase__ ( self : List[Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + """\n""" )
_lowerCAmelCase = 0
with open(__snake_case , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
_lowerCAmelCase = token_index
writer.write(""" """.join(__snake_case ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 220 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = cva.getAffineTransform(lowerCAmelCase , lowerCAmelCase )
return cva.warpAffine(lowerCAmelCase , lowerCAmelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
A__ : Any =cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
A__ : Any =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A__ , A__ : Tuple =gray_img.shape
# set different points to rotate image
A__ : Optional[int] =np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
A__ : Tuple =np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
A__ : List[str] =np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
A__ : Dict =np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
A__ : List[Any] =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A__ : Tuple =plt.figure(1)
A__ : int =['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 220 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.