Skip to content

Commit aec14ac

Browse files
committed
Add default arguments as json, flags to not save data during debugging and reverse all videos in a dataset, fix tsn preprocessing bug
1 parent f676aa9 commit aec14ac

10 files changed

+253
-110
lines changed

Diff for: models/c3d/default_HMDB51_testing.json

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
{"save": 1, "dataset": "HMDB51", "inputDims": 16, "outputDims": 51, "seqLength": 1, "expName": "c3d_HMDB51", "clipLength": 16, "clipOffset": "random", "size": 112, "fName": "testlist", "numVids": 1530, "numClips": 1, "loadedDataset": "HMDB51"}

Diff for: models/c3d/default_HMDB51_training.json

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
{"save": 1, "dataset": "HMDB51", "inputDims": 16, "outputDims": 51, "seqLength": 1, "expName": "c3d_HMDB51", "clipLength": 16, "clipOffset": "random", "size": 112, "fName": "trainlist", "numVids": 3570, "numClips": 5, "batchSize": 10}

Diff for: models/i3d/default_HMDB51_training.json

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
{"save": 0, "dataset": "HMDB51", "inputDims": 64, "outputDims": 51, "seqLength": 1, "expName": "test", "size": 224, "fName": "trainlist", "numVids": 3570, "batchSize": 10, "gradClipVal": 100, "optChoice": "adam"}

Diff for: models/resnet/default_HMDB51_training.json

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
{"save": 0, "dataset": "HMDB51", "inputDims": 50, "outputDims": 51, "seqLength": 50, "expName": "test", "size": 224, "fName": "trainlist", "numVids": 3570, "freeze": 1, "lossType": "half_loss"}

Diff for: models/tsn/default_HMDB51_training.json

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
{"save": 0, "dataset": "HMDB51", "inputDims": 60, "outputDims": 51, "seqLength": 20, "expName": "test", "size": 224, "fName": "trainlist", "numVids": 3570, "batchSize": 2, "gradClipVal": 20, "optChoice": "momentum"}

Diff for: models/tsn/default_preprocessing.py

+20-9
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,23 @@ def preprocess_image(image, output_height, output_width, is_training=False,
6161
# END IF
6262

6363

64+
def extract_segments(input_data_tensor, num_segs, snippet_length, segment_length):
65+
"""
66+
If the input data tensor has more frames than we need, then split the clip into num_segments and extract snippet_length number of frames from each segment
67+
Args:
68+
"""
69+
70+
input_data_tensor_temp = []
71+
72+
# For each segment the video is split into, randomly extract 'snippet_length' number of sequential frames within that segment
73+
for seg in range(num_segs):
74+
random_extract_index = tf.random_uniform(dtype=tf.int32, minval=seg * segment_length, maxval= (seg+1)*segment_length - snippet_length, shape=np.asarray([1]))[0]
75+
input_data_tensor_temp.append(tf.gather(input_data_tensor, tf.range(random_extract_index, random_extract_index+snippet_length)))
76+
77+
# END FOR
78+
79+
input_data_tensor = tf.concat(input_data_tensor_temp, axis=0)
80+
return input_data_tensor
6481

6582
def preprocess(input_data_tensor, frames, height, width, channel, input_dims, output_dims, seq_length, size, label, istraining, video_step, num_segs = 3, input_alpha=1.0):
6683
"""
@@ -105,16 +122,10 @@ def preprocess(input_data_tensor, frames, height, width, channel, input_dims, ou
105122
frames = tf.shape(input_data_tensor)[0]
106123
segment_length = frames/num_segs
107124

108-
input_data_tensor_temp = []
109-
110-
# For each segment the video is split into, randomly extract 'snippet_length' number of sequential frames within that segment
111-
for seg in range(num_segs):
112-
random_extract_index = tf.random_uniform(dtype=tf.int32, minval=seg * segment_length, maxval= (seg+1)*segment_length - snippet_length, shape=np.asarray([1]))[0]
113-
input_data_tensor_temp.append(tf.gather(input_data_tensor, tf.range(random_extract_index, random_extract_index+snippet_length)))
114-
115-
# END FOR
125+
input_data_tensor = tf.cond(tf.equal(segment_length, snippet_length),
126+
lambda: input_data_tensor[:snippet_length*num_segs],
127+
lambda: extract_segments(input_data_tensor, num_segs, snippet_length, segment_length))
116128

117-
input_data_tensor = tf.concat(input_data_tensor_temp, axis=0)
118129

119130
input_data_tensor = tf.map_fn(lambda img: resize(img, 256, 340), input_data_tensor)
120131

Diff for: test.py

+74-35
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# Basic imports
22
import os
33
import time
4+
import sys
45
import argparse
56
import tensorflow as tf
67
import numpy as np
@@ -21,22 +22,35 @@
2122
from utils.logger import Logger
2223
from random import shuffle
2324
from utils.load_dataset_tfrecords import load_dataset
25+
from utils.argument_utils import read_json, assign_args
2426

2527

2628
parser = argparse.ArgumentParser()
2729

28-
# Model parameters
30+
# Argument loading
31+
32+
parser.add_argument('--argsFile', action= 'store', type=str, default='none',
33+
help= 'The name of the file that contains a model\'s arguments. Also requires --model.')
2934

3035
parser.add_argument('--model', action= 'store', required=True,
31-
help= 'Model architecture (c3d, i3d, tsn, resnet)')
36+
help= 'Model architecture (c3d, tsn, i3d, resnet)')
37+
38+
args_init = parser.parse_known_args()[0]
39+
model_name = args_init.model
40+
args_file = args_init.argsFile
41+
args_json = read_json(model_name, args_file)
42+
json_keys = args_json.keys()
43+
3244

33-
parser.add_argument('--inputDims', action='store', required=True, type=int,
45+
# Model parameters
46+
47+
parser.add_argument('--inputDims', action='store', required='inputDims' not in json_keys, type=int,
3448
help = 'Input Dimensions (Number of frames to pass as input to the model)')
3549

36-
parser.add_argument('--outputDims', action='store', required=True, type=int,
50+
parser.add_argument('--outputDims', action='store', required='outputDims' not in json_keys, type=int,
3751
help = 'Output Dimensions (Number of classes in dataset)')
3852

39-
parser.add_argument('--seqLength', action='store', required=True, type=int,
53+
parser.add_argument('--seqLength', action='store', required='seqLength' not in json_keys, type=int,
4054
help = 'Number of output frames expected from model')
4155

4256
parser.add_argument('--modelAlpha', action='store', type=float, default=1.,
@@ -59,12 +73,15 @@
5973

6074
# Experiment parameters
6175

62-
parser.add_argument('--dataset', action= 'store', required=True,
76+
parser.add_argument('--dataset', action= 'store', required='dataset' not in json_keys,
6377
help= 'Dataset (UCF101, HMDB51)')
6478

65-
parser.add_argument('--loadedDataset', action= 'store', required=True,
79+
parser.add_argument('--loadedDataset', action= 'store', required='loadedDataset' not in json_keys,
6680
help= 'Dataset (UCF101, HMDB51)')
6781

82+
parser.add_argument('--loadedPreproc', action= 'store', type=str, default='null',
83+
help= 'The preprocessing of the weights to be loaded.')
84+
6885
parser.add_argument('--numGpus', action= 'store', type=int, default=1,
6986
help = 'Number of Gpus used for calculation')
7087

@@ -80,13 +97,13 @@
8097
parser.add_argument('--loadedCheckpoint', action='store', type=int, default=-1,
8198
help = 'Specify the step of the saved model checkpoint that will be loaded for testing. Defaults to most recent checkpoint.')
8299

83-
parser.add_argument('--size', action='store', required=True, type=int,
100+
parser.add_argument('--size', action='store', required='size' not in json_keys, type=int,
84101
help = 'Input frame size')
85102

86-
parser.add_argument('--expName', action='store', required=True,
103+
parser.add_argument('--expName', action='store', required='expName' not in json_keys,
87104
help = 'Unique name of experiment being run')
88105

89-
parser.add_argument('--numVids', action='store', required=True, type=int,
106+
parser.add_argument('--numVids', action='store', required='numVids' not in json_keys, type=int,
90107
help = 'Number of videos to be used for testing')
91108

92109
parser.add_argument('--split', action='store', type=int, default=1,
@@ -95,7 +112,7 @@
95112
parser.add_argument('--baseDataPath', action='store', default='/z/dat',
96113
help = 'Path to datasets')
97114

98-
parser.add_argument('--fName', action='store', required=True,
115+
parser.add_argument('--fName', action='store', required='fName' not in json_keys,
99116
help = 'Which dataset list to use (trainlist, testlist, vallist)')
100117

101118
parser.add_argument('--clipLength', action='store', type=int, default=-1,
@@ -143,9 +160,18 @@
143160
parser.add_argument('--topk', action='store', type=int, default=3,
144161
help = 'Integer indication top k predictions made (Default 3)')
145162

163+
parser.add_argument('--save', action='store', type=int, default=1,
164+
help = 'Boolean indicating whether to save any metrics, logs, or results. Used for testing if the code runs.')
165+
166+
parser.add_argument('--reverse', action='store', type=int, default=0,
167+
help = 'Boolean indicating whether reverse videos and classify them as a new action class. 0 all videos are forward, 1 randomly reversed videos, 2 all videos are reversed')
168+
146169

147170
args = parser.parse_args()
148171

172+
args = assign_args(args, args_json, sys.argv)
173+
174+
149175
if args.verbose:
150176
print "Setup of current experiments"
151177
print "\n############################"
@@ -154,7 +180,12 @@
154180

155181
# END IF
156182

183+
loaded_preproc = args.loadedPreproc
184+
if loaded_preproc=='null':
185+
loaded_preproc = args.preprocMethod
186+
157187
model_name = args.model
188+
save_bool = args.save
158189

159190
model = models_import.create_model_object(modelName = model_name,
160191
inputAlpha = args.inputAlpha,
@@ -176,7 +207,7 @@
176207
verbose = args.verbose)
177208

178209

179-
def test(model, input_dims, output_dims, seq_length, size, dataset, loaded_dataset, experiment_name, num_vids, split, base_data_path, f_name, load_model, return_layer, clip_length, video_offset, clip_offset, num_clips, clip_stride, metrics_method, batch_size, metrics_dir, loaded_checkpoint, verbose, gpu_list, preproc_method, random_init, avg_clips, use_softmax, preproc_debugging, topk):
210+
def test(model, input_dims, output_dims, seq_length, size, dataset, loaded_dataset, experiment_name, num_vids, split, base_data_path, f_name, load_model, return_layer, clip_length, video_offset, clip_offset, num_clips, clip_stride, metrics_method, batch_size, metrics_dir, loaded_checkpoint, verbose, gpu_list, preproc_method, loaded_preproc, random_init, avg_clips, use_softmax, preproc_debugging, reverse, topk):
180211
"""
181212
Function used to test the performance and analyse a chosen model
182213
Args:
@@ -206,6 +237,7 @@ def test(model, input_dims, output_dims, seq_length, size, dataset, loaded_datas
206237
:verbose: Boolean to indicate if all print statement should be procesed or not
207238
:gpu_list: List of GPU IDs to be used
208239
:preproc_method: The preprocessing method to use, default, cvr, rr, sr, or any other custom preprocessing
240+
:loaded_preproc: Name of preproc method which was used to train the current model
209241
:random_init: Randomly initialize model weights, not loading from any files (deafult False)
210242
:avg_clips: Binary boolean indicating whether to average predictions across clips
211243
:use_softmax: Binary boolean indicating whether to apply softmax to the inference of the model
@@ -226,7 +258,7 @@ def test(model, input_dims, output_dims, seq_length, size, dataset, loaded_datas
226258
# Load pre-trained/saved model
227259
if load_model:
228260
try:
229-
ckpt, gs_init, learning_rate_init = load_checkpoint(model.name, loaded_dataset, experiment_name, loaded_checkpoint, preproc_method)
261+
ckpt, gs_init, learning_rate_init = load_checkpoint(model.name, loaded_dataset, experiment_name, loaded_checkpoint, loaded_preproc)
230262
if verbose:
231263
print 'A better checkpoint is found. The global_step value is: ' + str(gs_init)
232264

@@ -262,7 +294,7 @@ def test(model, input_dims, output_dims, seq_length, size, dataset, loaded_datas
262294

263295
# Setting up tensors for models
264296
# input_data_tensor - [batchSize, inputDims, height, width, channels]
265-
input_data_tensor, labels_tensor, names_tensor = load_dataset(model, 1, batch_size, output_dims, input_dims, seq_length, size, data_path, dataset, istraining, clip_length, video_offset, clip_offset, num_clips, clip_stride, video_step, preproc_debugging, 0, verbose)
297+
input_data_tensor, labels_tensor, names_tensor = load_dataset(model, 1, batch_size, output_dims, input_dims, seq_length, size, data_path, dataset, istraining, clip_length, video_offset, clip_offset, num_clips, clip_stride, video_step, preproc_debugging, 0, verbose, reverse=reverse)
266298

267299
######### GPU list check block ####################
268300

@@ -303,28 +335,29 @@ def test(model, input_dims, output_dims, seq_length, size, dataset, loaded_datas
303335

304336
############################################################################################################################################
305337

338+
if save_bool:
339+
######################### Logger Setup block ######################################
306340

307-
######################### Logger Setup block ######################################
341+
# Logger setup (Name format: Date, month, hour, minute and second, with a prefix of exp_test)
342+
log_name = ("exp_test_%s_%s_%s_%s_%s" % ( time.strftime("%d_%m_%H_%M_%S"),
343+
dataset, preproc_method, experiment_name, metrics_method))
308344

309-
# Logger setup (Name format: Date, month, hour, minute and second, with a prefix of exp_test)
310-
log_name = ("exp_test_%s_%s_%s_%s_%s" % ( time.strftime("%d_%m_%H_%M_%S"),
311-
dataset, preproc_method, experiment_name, metrics_method))
345+
curr_logger = Logger(os.path.join('logs', model.name, dataset, preproc_method, metrics_dir, log_name))
346+
make_dir(os.path.join('results',model.name))
347+
make_dir(os.path.join('results',model.name, dataset))
348+
make_dir(os.path.join('results',model.name, dataset, preproc_method))
349+
make_dir(os.path.join('results',model.name, dataset, preproc_method, experiment_name))
350+
make_dir(os.path.join('results',model.name, dataset, preproc_method, experiment_name, metrics_dir))
312351

313-
curr_logger = Logger(os.path.join('logs', model.name, dataset, preproc_method, metrics_dir, log_name))
314-
make_dir(os.path.join('results',model.name))
315-
make_dir(os.path.join('results',model.name, dataset))
316-
make_dir(os.path.join('results',model.name, dataset, preproc_method))
317-
make_dir(os.path.join('results',model.name, dataset, preproc_method, experiment_name))
318-
make_dir(os.path.join('results',model.name, dataset, preproc_method, experiment_name, metrics_dir))
319-
320-
###################################################################################
352+
###################################################################################
321353

322354
# TF session setup
323355
#sess = tf.Session()
324356
init = (tf.global_variables_initializer(), tf.local_variables_initializer())
325357
coord = tf.train.Coordinator()
326358
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
327-
metrics = Metrics( output_dims, seq_length, curr_logger, metrics_method, istraining, model.name, experiment_name, preproc_method, dataset, metrics_dir, verbose=verbose, topk=topk)
359+
if save_bool:
360+
metrics = Metrics( output_dims, seq_length, curr_logger, metrics_method, istraining, model.name, experiment_name, preproc_method, dataset, metrics_dir, verbose=verbose, topk=topk)
328361

329362
# Variables get randomly initialized into tf graph
330363
sess.run(init)
@@ -375,7 +408,9 @@ def test(model, input_dims, output_dims, seq_length, size, dataset, loaded_datas
375408
break
376409

377410
count += 1
378-
metrics.log_prediction(labels[batch_idx][0], output_predictions[batch_idx], vid_name, count)
411+
412+
if save_bool:
413+
metrics.log_prediction(labels[batch_idx][0], output_predictions[batch_idx], vid_name, count)
379414

380415
# END IF
381416

@@ -388,15 +423,17 @@ def test(model, input_dims, output_dims, seq_length, size, dataset, loaded_datas
388423
coord.request_stop()
389424
coord.join(threads)
390425

391-
total_accuracy = metrics.total_classification()
392-
total_pred = metrics.get_predictions_array()
393426

394-
if verbose:
395-
print "Total accuracy : ", total_accuracy
396-
print total_pred
427+
if save_bool:
428+
total_accuracy = metrics.total_classification()
429+
total_pred = metrics.get_predictions_array()
430+
431+
if verbose:
432+
print "Total accuracy : ", total_accuracy
433+
print total_pred
397434

398-
# Save results in numpy format
399-
np.save(os.path.join('results', model.name, dataset, preproc_method, experiment_name, metrics_dir, 'test_predictions_'+dataset+"_"+metrics_method+'.npy'), np.array(total_pred))
435+
# Save results in numpy format
436+
np.save(os.path.join('results', model.name, dataset, preproc_method, experiment_name, metrics_dir, 'test_predictions_'+dataset+"_"+metrics_method+'.npy'), np.array(total_pred))
400437

401438

402439
if __name__=="__main__":
@@ -427,10 +464,12 @@ def test(model, input_dims, output_dims, seq_length, size, dataset, loaded_datas
427464
verbose = args.verbose,
428465
gpu_list = args.gpuList,
429466
preproc_method = args.preprocMethod,
467+
loaded_preproc = loaded_preproc,
430468
random_init = args.randomInit,
431469
avg_clips = args.avgClips,
432470
use_softmax = args.useSoftmax,
433471
preproc_debugging = args.preprocDebugging,
472+
reverse = args.reverse,
434473
topk = args.topk)
435474

436475
# END IF

0 commit comments

Comments
 (0)