|
23 | 23 | dict(type='MMRandomFlip', flip_ratio=0.5),
|
24 | 24 | dict(
|
25 | 25 | type='MMAutoAugment',
|
26 |
| - policies=[[ |
27 |
| - dict( |
28 |
| - type='MMResize', |
29 |
| - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), |
30 |
| - (608, 1333), (640, 1333), (672, 1333), (704, 1333), |
31 |
| - (736, 1333), (768, 1333), (800, 1333)], |
32 |
| - multiscale_mode='value', |
33 |
| - keep_ratio=True) |
34 |
| - ], |
35 |
| - [ |
36 |
| - dict( |
37 |
| - type='MMResize', |
38 |
| - img_scale=[(400, 1333), (500, 1333), (600, 1333)], |
39 |
| - multiscale_mode='value', |
40 |
| - keep_ratio=True), |
41 |
| - dict( |
42 |
| - type='MMRandomCrop', |
43 |
| - crop_type='absolute_range', |
44 |
| - crop_size=(384, 600), |
45 |
| - allow_negative_crop=True), |
46 |
| - dict( |
47 |
| - type='MMResize', |
48 |
| - img_scale=[(480, 1333), (512, 1333), (544, 1333), |
49 |
| - (576, 1333), (608, 1333), (640, 1333), |
50 |
| - (672, 1333), (704, 1333), (736, 1333), |
51 |
| - (768, 1333), (800, 1333)], |
52 |
| - multiscale_mode='value', |
53 |
| - override=True, |
54 |
| - keep_ratio=True) |
55 |
| - ]]), |
| 26 | + policies=[ |
| 27 | + [ |
| 28 | + dict( |
| 29 | + type='MMResize', |
| 30 | + img_scale=[(480, 1333), (512, 1333), (544, 1333), |
| 31 | + (576, 1333), (608, 1333), (640, 1333), |
| 32 | + (672, 1333), (704, 1333), (736, 1333), |
| 33 | + (768, 1333), (800, 1333)], |
| 34 | + multiscale_mode='value', |
| 35 | + keep_ratio=True) |
| 36 | + ], |
| 37 | + [ |
| 38 | + dict( |
| 39 | + type='MMResize', |
| 40 | + # The radio of all image in train dataset < 7 |
| 41 | + # follow the original impl |
| 42 | + img_scale=[(400, 4200), (500, 4200), (600, 4200)], |
| 43 | + multiscale_mode='value', |
| 44 | + keep_ratio=True), |
| 45 | + dict( |
| 46 | + type='MMRandomCrop', |
| 47 | + crop_type='absolute_range', |
| 48 | + crop_size=(384, 600), |
| 49 | + allow_negative_crop=True), |
| 50 | + dict( |
| 51 | + type='MMResize', |
| 52 | + img_scale=[(480, 1333), (512, 1333), (544, 1333), |
| 53 | + (576, 1333), (608, 1333), (640, 1333), |
| 54 | + (672, 1333), (704, 1333), (736, 1333), |
| 55 | + (768, 1333), (800, 1333)], |
| 56 | + multiscale_mode='value', |
| 57 | + override=True, |
| 58 | + keep_ratio=True) |
| 59 | + ] |
| 60 | + ]), |
56 | 61 | dict(type='MMNormalize', **img_norm_cfg),
|
57 | 62 | dict(type='MMPad', size_divisor=1),
|
58 | 63 | dict(type='DefaultFormatBundle'),
|
|
96 | 101 | ],
|
97 | 102 | classes=CLASSES,
|
98 | 103 | test_mode=False,
|
99 |
| - filter_empty_gt=True, |
| 104 | + filter_empty_gt=False, |
100 | 105 | iscrowd=False),
|
101 | 106 | pipeline=train_pipeline)
|
102 | 107 |
|
|
118 | 123 | pipeline=test_pipeline)
|
119 | 124 |
|
120 | 125 | data = dict(
|
121 |
| - imgs_per_gpu=2, workers_per_gpu=2, train=train_dataset, val=val_dataset) |
| 126 | + imgs_per_gpu=2, |
| 127 | + workers_per_gpu=2, |
| 128 | + train=train_dataset, |
| 129 | + val=val_dataset, |
| 130 | + drop_last=True) |
122 | 131 |
|
123 | 132 | # evaluation
|
124 | 133 | eval_config = dict(interval=1, gpu_collect=False)
|
125 | 134 | eval_pipelines = [
|
126 | 135 | dict(
|
127 | 136 | mode='test',
|
| 137 | + dist_eval=True, |
128 | 138 | evaluators=[
|
129 | 139 | dict(type='CocoDetectionEvaluator', classes=CLASSES),
|
130 | 140 | ],
|
|
0 commit comments