Tonic commited on
Commit
c5465f9
·
unverified ·
1 Parent(s): 62bb2f0

revert to autoconfig

Browse files
Files changed (1) hide show
  1. tasks/text.py +13 -9
tasks/text.py CHANGED
@@ -3,7 +3,7 @@ from datetime import datetime
3
  from datasets import load_dataset
4
  from sklearn.metrics import accuracy_score
5
  import torch
6
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
7
  from torch.utils.data import DataLoader
8
  from transformers import DataCollatorWithPadding
9
 
@@ -53,6 +53,7 @@ async def evaluate_text(request: TextEvaluationRequest):
53
  # MODEL INFERENCE CODE
54
  #--------------------------------------------------------------------------------------------
55
 
 
56
  try:
57
  # Set device
58
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -64,20 +65,22 @@ async def evaluate_text(request: TextEvaluationRequest):
64
  # Initialize tokenizer
65
  tokenizer = AutoTokenizer.from_pretrained(path_tokenizer)
66
 
67
- # Initialize model with specific configuration
 
 
 
 
 
 
 
68
  model = AutoModelForSequenceClassification.from_pretrained(
69
  path_model,
 
70
  trust_remote_code=True,
71
  num_labels=8,
72
  problem_type="single_label_classification",
73
  ignore_mismatched_sizes=True,
74
- torch_dtype=torch.float16,
75
- config_overrides={
76
- "norm_bias": None, # Remove bias parameter
77
- "classifier_bias": None,
78
- "attention_bias": None,
79
- "mlp_bias": None
80
- }
81
  ).to(device)
82
 
83
  # Set model to evaluation mode
@@ -133,6 +136,7 @@ async def evaluate_text(request: TextEvaluationRequest):
133
  print(f"Error during model inference: {str(e)}")
134
  raise
135
 
 
136
  #--------------------------------------------------------------------------------------------
137
  # MODEL INFERENCE ENDS HERE
138
  #--------------------------------------------------------------------------------------------
 
3
  from datasets import load_dataset
4
  from sklearn.metrics import accuracy_score
5
  import torch
6
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoConfig
7
  from torch.utils.data import DataLoader
8
  from transformers import DataCollatorWithPadding
9
 
 
53
  # MODEL INFERENCE CODE
54
  #--------------------------------------------------------------------------------------------
55
 
56
+
57
  try:
58
  # Set device
59
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
65
  # Initialize tokenizer
66
  tokenizer = AutoTokenizer.from_pretrained(path_tokenizer)
67
 
68
+ # Load and modify config
69
+ config = AutoConfig.from_pretrained(path_model)
70
+ config.norm_bias = False
71
+ config.classifier_bias = False
72
+ config.attention_bias = False
73
+ config.mlp_bias = False
74
+
75
+ # Initialize model with modified config
76
  model = AutoModelForSequenceClassification.from_pretrained(
77
  path_model,
78
+ config=config,
79
  trust_remote_code=True,
80
  num_labels=8,
81
  problem_type="single_label_classification",
82
  ignore_mismatched_sizes=True,
83
+ torch_dtype=torch.float16
 
 
 
 
 
 
84
  ).to(device)
85
 
86
  # Set model to evaluation mode
 
136
  print(f"Error during model inference: {str(e)}")
137
  raise
138
 
139
+
140
  #--------------------------------------------------------------------------------------------
141
  # MODEL INFERENCE ENDS HERE
142
  #--------------------------------------------------------------------------------------------