-
Notifications
You must be signed in to change notification settings - Fork 1
/
train.py
169 lines (143 loc) · 6.47 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import pickle as pickle
import os
import pandas as pd
import torch
from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold, train_test_split
from transformers import AutoTokenizer, AutoModel, AutoConfig, BertForSequenceClassification, Trainer, TrainingArguments, BertConfig, EarlyStoppingCallback
from load_data import *
import argparse
from importlib import import_module
from pathlib import Path
import glob
import re
import numpy as np
import random
import wandb
def seed_everything(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
def increment_output_dir(output_path, exist_ok=False):
path = Path(output_path)
if (path.exists() and exist_ok) or (not path.exists()):
return str(path)
else:
dirs = glob.glob(f"{path}*")
matches = [re.search(rf"%s(\d+)" %path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m]
n = max(i) + 1 if i else 2
return f"{path}{n}"
# 평가를 위한 metrics function.
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
# calculate accuracy using sklearn's function
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
}
def train(args):
seed_everything(args.seed)
# set wandb
hyperparameter_defaults = dict(
# dropout = 0.1,
model_type = 'Bert',
pretrained_model = 'bert-base-multilingual-cased',
seed = 1331,
epochs = 1,
batch_size = 100,
lr = 5.62e-5,
output_dir = './results/expr',
max_length = 200,
# model_name = 'BertForSequenceClassification',
# tokenizer_name = 'BertTokenizer',
# smoothing = 0.2
)
wandb.init(config=hyperparameter_defaults, project="sweep-test")
wandb_config = wandb.config
# load model and tokenizer
MODEL_NAME = args.pretrained_model
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
# load dataset
# train_dataset = load_data("/opt/ml/input/data/train/train.tsv")
whole_dataset = load_data("/opt/ml/input/data/train/all.tsv")
whole_label = whole_dataset['label'].values
train_dataset, val_dataset= train_test_split(whole_dataset, test_size=0.1, random_state=args.seed)
# tokenizing dataset
tokenized_train = tokenized_dataset(train_dataset, tokenizer, wandb_config.max_length)
tokenized_val = tokenized_dataset(val_dataset, tokenizer, wandb_config.max_length)
# make dataset for pytorch.
RE_train_dataset = RE_Dataset(tokenized_train, train_dataset['label'].values)
RE_val_dataset = RE_Dataset(tokenized_val, val_dataset['label'].values)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# setting model hyperparameter
config_module = getattr(import_module("transformers"), args.model_type + "Config")
model_config = config_module.from_pretrained(MODEL_NAME)
# model_config = AutoConfig.from_pretrained(MODEL_NAME)
model_config.num_labels = 42
model_module = getattr(import_module("transformers"), args.model_type + "ForSequenceClassification")
model = model_module.from_pretrained(MODEL_NAME, config=model_config)
# model = AutoModel.from_pretrained(MODEL_NAME, config=model_config)
model.to(device)
output_dir = increment_output_dir(args.output_dir)
print(f"output_dir : {output_dir}")
# 사용한 option 외에도 다양한 option들이 있습니다.
# https://huggingface.co/transformers/main_classes/trainer.html#trainingarguments 참고해주세요.
training_args = TrainingArguments(
output_dir=output_dir, # output directory
save_total_limit=args.save_total_limit, # number of total save model.
save_steps=500, # model saving step.
num_train_epochs=wandb_config.epochs, # total number of training epochs
# save_strategy='epoch', # also save at last time
fp16 = True,
dataloader_num_workers=4,
label_smoothing_factor=0.5,
learning_rate=wandb_config.lr, # learning_rate
per_device_train_batch_size=wandb_config.batch_size, # batch size per device during training
#per_device_eval_batch_size=16, # batch size for evaluation
warmup_steps=500, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay
logging_dir='./logs', # directory for storing logs
logging_steps=100, # log saving step.
evaluation_strategy='steps', # evaluation strategy to adopt during training
# `no`: No evaluation during training.
# `steps`: Evaluate every `eval_steps`.
# `epoch`: Evaluate every end of epoch.
load_best_model_at_end=True,
metric_for_best_model='loss',
eval_steps = 500, # evaluation step.
)
early_stopping = EarlyStoppingCallback(early_stopping_patience = 5, early_stopping_threshold = 0.001)
trainer = Trainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
callbacks=[early_stopping],
train_dataset=RE_train_dataset, # training dataset
eval_dataset=RE_val_dataset, # evaluation dataset
compute_metrics=compute_metrics # define metrics function
)
# train model
trainer.train()
# save last model
# trainer.save_model(output_dir)
# trainer.save_state()
def main(args):
train(args)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', type=str, default='Bert')
parser.add_argument('--pretrained_model', type=str, default='bert-base-multilingual-cased')
parser.add_argument('--seed' , type=int , default = 1331)
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--lr', type=float, default=5e-5)
parser.add_argument('--output_dir', type=str, default='./results/expr')
parser.add_argument('--save_total_limit', type=int, default=3)
parser.add_argument('--max_length', type=int, default=200)
args = parser.parse_args()
main(args)