fredzzp commited on
Commit
a657eab
·
verified ·
1 Parent(s): 612b51f

Initial model upload with self-contained custom code

Browse files
Files changed (1) hide show
  1. modeling_qwen2.py +0 -5
modeling_qwen2.py CHANGED
@@ -470,10 +470,6 @@ class Qwen2RotaryEmbedding(nn.Module):
470
  sin = sin * self.attention_scaling
471
  return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
472
 
473
- @add_start_docstrings(
474
- "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
475
- QWEN2_START_DOCSTRING,
476
- )
477
  class Qwen2PreTrainedModel(PreTrainedModel):
478
  # ... (class unchanged)
479
  config_class = Qwen2Config
@@ -654,7 +650,6 @@ class Qwen2ForCausalLM(Qwen2PreTrainedModel, MDMGenerationMixin):
654
  def get_decoder(self):
655
  return self.model
656
 
657
- @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
658
  @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
659
  def forward(
660
  self,
 
470
  sin = sin * self.attention_scaling
471
  return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
472
 
 
 
 
 
473
  class Qwen2PreTrainedModel(PreTrainedModel):
474
  # ... (class unchanged)
475
  config_class = Qwen2Config
 
650
  def get_decoder(self):
651
  return self.model
652
 
 
653
  @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
654
  def forward(
655
  self,