-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtrainer.py
141 lines (111 loc) · 4.55 KB
/
trainer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import logging
import os
import sys
import torch
from utils import factory
from utils.data_manager import DataManager
from utils.toolkit import count_parameters
def train(args):
# Specified Seed
# seed_list = copy.deepcopy(args["seed"])
# device = copy.deepcopy(args["device"])
#
# for seed in seed_list:
# args["seed"] = seed
# args["device"] = device
# result = _train(args)
result = _train(args)
return result
def _train(args):
init_cls = 0 if args["init_cls"] == args["increment"] else args["init_cls"]
isVisualization = True if args['file_name'] == "exper_visualization" else False
logs_name = "logs/{}/{}/{}/{}".format(args["cls_model"], args["dataset"], init_cls, args['increment'])
if not os.path.exists(logs_name):
os.makedirs(logs_name)
logfilename = "logs/{}/{}/{}/{}/{}_{}_{}".format(
args["cls_model"],
args["dataset"],
init_cls,
args["increment"],
args["prefix"],
args["seed"],
args["backbone_type"],
)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(filename)s] => %(message)s",
handlers=[
logging.FileHandler(filename=logfilename + ".log"),
logging.StreamHandler(sys.stdout),
],
)
_set_random(args)
_set_device(args)
print_args(args)
data_manager = DataManager(
args["dataset"],
args["shuffle"],
args["seed"],
args["init_cls"],
args["increment"],
args
)
model = factory.get_model(args["cls_model"], args)
cnn_curve, nme_curve = {"top1": [], "top5": []}, {"top1": [], "top5": []}
logging.info("Round {}".format(args['round']))
for task in range(data_manager.nb_tasks):
logging.info("Task %d", task)
logging.info("All params: {}".format(count_parameters(model._network)))
logging.info(
"Trainable params: {}".format(count_parameters(model._network, True))
)
model.incremental_train(data_manager, args)
cnn_accy, nme_accy = model.eval_task(visulization=isVisualization)
model.after_task()
if nme_accy is not None:
logging.info("CNN: {}".format(cnn_accy["grouped"]))
logging.info("NME: {}".format(nme_accy["grouped"]))
cnn_curve["top1"].append(cnn_accy["top1"])
cnn_curve["top5"].append(cnn_accy["top5"])
nme_curve["top1"].append(nme_accy["top1"])
nme_curve["top5"].append(nme_accy["top5"])
logging.info("CNN top1 curve: {}".format(cnn_curve["top1"]))
logging.info("CNN top5 curve: {}".format(cnn_curve["top5"]))
logging.info("NME top1 curve: {}".format(nme_curve["top1"]))
logging.info("NME top5 curve: {}\n".format(nme_curve["top5"]))
print('Average Accuracy (CNN):', sum(cnn_curve["top1"]) / len(cnn_curve["top1"]))
print('Average Accuracy (NME):', sum(nme_curve["top1"]) / len(nme_curve["top1"]))
logging.info("Average Accuracy (CNN): {}".format(sum(cnn_curve["top1"]) / len(cnn_curve["top1"])))
logging.info("Average Accuracy (NME): {}".format(sum(nme_curve["top1"]) / len(nme_curve["top1"])))
else:
logging.info("No NME accuracy.")
logging.info("CNN: {}".format(cnn_accy["grouped"]))
cnn_curve["top1"].append(cnn_accy["top1"])
cnn_curve["top5"].append(cnn_accy["top5"])
logging.info("CNN top1 curve: {}".format(cnn_curve["top1"]))
logging.info("CNN top5 curve: {}\n".format(cnn_curve["top5"]))
print('Average Accuracy (CNN):', sum(cnn_curve["top1"]) / len(cnn_curve["top1"]))
logging.info("Average Accuracy (CNN): {}".format(sum(cnn_curve["top1"]) / len(cnn_curve["top1"])))
result = {'cnn_curve': cnn_curve}
return result
def _set_device(args):
device_type = args["device"]
gpus = []
for device in device_type:
if device == -1:
device = torch.device("cpu")
else:
if not isinstance(device, torch.device):
device = torch.device("cuda:{}".format(device))
gpus.append(device)
args["device"] = gpus
def _set_random(args):
torch.manual_seed(args["seed"])
torch.cuda.manual_seed(args["seed"])
torch.cuda.manual_seed_all(args["seed"])
# get into a static state
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
def print_args(args):
for key, value in args.items():
logging.info("{}: {}".format(key, value))