Update modeling_gpt_refact.py
Browse files- modeling_gpt_refact.py +0 -6
 
    	
        modeling_gpt_refact.py
    CHANGED
    
    | 
         @@ -372,9 +372,6 @@ class GPTRefactModel(GPTRefactPreTrainedModel): 
     | 
|
| 372 | 
         
             
                def get_input_embeddings(self):
         
     | 
| 373 | 
         
             
                    return self.wte
         
     | 
| 374 | 
         | 
| 375 | 
         
            -
                def set_input_embeddings(self, new_embeddings):
         
     | 
| 376 | 
         
            -
                    self.wte = new_embeddings
         
     | 
| 377 | 
         
            -
             
     | 
| 378 | 
         
             
                def forward(
         
     | 
| 379 | 
         
             
                        self,
         
     | 
| 380 | 
         
             
                        input_ids: Optional[torch.Tensor] = None,
         
     | 
| 
         @@ -518,9 +515,6 @@ class GPTRefactForCausalLM(GPTRefactPreTrainedModel): 
     | 
|
| 518 | 
         
             
                def get_output_embeddings(self):
         
     | 
| 519 | 
         
             
                    return self.lm_head
         
     | 
| 520 | 
         | 
| 521 | 
         
            -
                def set_output_embeddings(self, new_embeddings):
         
     | 
| 522 | 
         
            -
                    self.lm_head = new_embeddings
         
     | 
| 523 | 
         
            -
             
     | 
| 524 | 
         
             
                def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
         
     | 
| 525 | 
         
             
                    if inputs_embeds is not None and past_key_values is None:
         
     | 
| 526 | 
         
             
                        model_inputs = {"inputs_embeds": inputs_embeds}
         
     | 
| 
         | 
|
| 372 | 
         
             
                def get_input_embeddings(self):
         
     | 
| 373 | 
         
             
                    return self.wte
         
     | 
| 374 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 375 | 
         
             
                def forward(
         
     | 
| 376 | 
         
             
                        self,
         
     | 
| 377 | 
         
             
                        input_ids: Optional[torch.Tensor] = None,
         
     | 
| 
         | 
|
| 515 | 
         
             
                def get_output_embeddings(self):
         
     | 
| 516 | 
         
             
                    return self.lm_head
         
     | 
| 517 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 518 | 
         
             
                def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
         
     | 
| 519 | 
         
             
                    if inputs_embeds is not None and past_key_values is None:
         
     | 
| 520 | 
         
             
                        model_inputs = {"inputs_embeds": inputs_embeds}
         
     |