24
24
import sys
25
25
import argparse
26
26
import pprint
27
+ import time
28
+
27
29
import numpy as np
28
30
import paddle
29
- import paddle .fluid as fluid
31
+ import paddle .static as static
30
32
31
33
from utils import paddle_utils
32
34
from utils .config import cfg
33
- from utils .timer import Timer , calculate_eta
35
+ from utils .timer import TimeAverager , calculate_eta
34
36
from models .model_builder import build_model
35
37
from models .model_builder import ModelPhase
36
38
from reader import SegDataset
@@ -82,8 +84,8 @@ def evaluate(cfg,
82
84
** kwargs ):
83
85
np .set_printoptions (precision = 5 , suppress = True )
84
86
85
- startup_prog = fluid .Program ()
86
- test_prog = fluid .Program ()
87
+ startup_prog = static .Program ()
88
+ test_prog = static .Program ()
87
89
dataset = SegDataset (
88
90
file_list = cfg .DATASET .VAL_FILE_LIST ,
89
91
mode = ModelPhase .EVAL ,
@@ -109,17 +111,17 @@ def data_generator():
109
111
110
112
# Get device environment
111
113
if use_gpu :
112
- places = fluid .cuda_places ()
114
+ places = static .cuda_places ()
113
115
elif use_xpu :
114
116
xpu_id = int (os .environ .get ('FLAGS_selected_xpus' , 0 ))
115
- places = [fluid .XPUPlace (xpu_id )]
117
+ places = [paddle .XPUPlace (xpu_id )]
116
118
else :
117
- places = fluid .cpu_places ()
119
+ places = static .cpu_places ()
118
120
place = places [0 ]
119
121
dev_count = len (places )
120
122
print ("#Device count: {}" .format (dev_count ))
121
123
122
- exe = fluid .Executor (place )
124
+ exe = static .Executor (place )
123
125
exe .run (startup_prog )
124
126
125
127
test_prog = test_prog .clone (for_test = True )
@@ -132,9 +134,9 @@ def data_generator():
132
134
if ckpt_dir is not None :
133
135
print ('load test model:' , ckpt_dir )
134
136
try :
135
- fluid .load (test_prog , os .path .join (ckpt_dir , 'model' ), exe )
137
+ static .load (test_prog , os .path .join (ckpt_dir , 'model' ), exe )
136
138
except :
137
- fluid .io .load_params (exe , ckpt_dir , main_program = test_prog )
139
+ paddle . fluid .io .load_params (exe , ckpt_dir , main_program = test_prog )
138
140
139
141
# Use streaming confusion matrix to calculate mean_iou
140
142
np .set_printoptions (
@@ -144,11 +146,13 @@ def data_generator():
144
146
num_images = 0
145
147
step = 0
146
148
all_step = cfg .DATASET .TEST_TOTAL_IMAGES // cfg .BATCH_SIZE + 1
147
- timer = Timer ()
148
- timer .start ()
149
+ reader_cost_averager = TimeAverager ()
150
+ batch_cost_averager = TimeAverager ()
151
+ batch_start = time .time ()
149
152
data_loader .start ()
150
153
while True :
151
154
try :
155
+ reader_cost_averager .record (time .time () - batch_start )
152
156
step += 1
153
157
loss , pred , grts , masks = exe .run (
154
158
test_prog , fetch_list = fetch_list , return_numpy = True )
@@ -160,15 +164,17 @@ def data_generator():
160
164
_ , iou = conf_mat .mean_iou ()
161
165
_ , acc = conf_mat .accuracy ()
162
166
163
- speed = 1.0 / timer .elapsed_time ()
164
-
167
+ batch_cost_averager .record (
168
+ time .time () - batch_start , num_samples = cfg .BATCH_SIZE )
169
+ batch_cost = batch_cost_averager .get_average ()
170
+ reader_cost = reader_cost_averager .get_average ()
171
+ eta = calculate_eta (all_step - step , batch_cost )
165
172
print (
166
- "[EVAL]step: {} loss: {:.5f} acc: {:.4f} IoU: {:.4f} step/sec: {:.2f} | ETA {}"
167
- .format (step , loss , acc , iou , speed ,
168
- calculate_eta (all_step - step , speed )))
169
- timer .restart ()
173
+ "[EVAL]step: {} loss: {:.5f} acc: {:.4f} IoU: {:.4f} batch_cost: {:.4f}, reader_cost: {:.5f} | ETA {}"
174
+ .format (step , loss , acc , iou , batch_cost , reader_cost , eta ))
175
+ batch_start = time .time ()
170
176
sys .stdout .flush ()
171
- except fluid .core .EOFException :
177
+ except paddle . fluid .core .EOFException :
172
178
break
173
179
174
180
category_iou , avg_iou = conf_mat .mean_iou ()
0 commit comments