Skip to content

Commit 1b78d83

Browse files
committed
init
0 parents  commit 1b78d83

File tree

880 files changed

+77907
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

880 files changed

+77907
-0
lines changed

.dev_scripts/benchmark_filter.py

+127
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
import argparse
2+
import os
3+
import os.path as osp
4+
5+
import mmcv
6+
7+
8+
def parse_args():
9+
parser = argparse.ArgumentParser(description='Filter configs to train')
10+
parser.add_argument(
11+
'--basic-arch',
12+
action='store_true',
13+
help='to train models in basic arch')
14+
parser.add_argument(
15+
'--datasets', action='store_true', help='to train models in dataset')
16+
parser.add_argument(
17+
'--data-pipeline',
18+
action='store_true',
19+
help='to train models related to data pipeline, e.g. augmentations')
20+
parser.add_argument(
21+
'--nn-module',
22+
action='store_true',
23+
help='to train models related to neural network modules')
24+
25+
args = parser.parse_args()
26+
return args
27+
28+
29+
basic_arch_root = [
30+
'cascade_rcnn', 'double_heads', 'fcos', 'foveabox', 'free_anchor',
31+
'grid_rcnn', 'guided_anchoring', 'htc', 'libra_rcnn', 'atss', 'mask_rcnn',
32+
'ms_rcnn', 'nas_fpn', 'reppoints', 'retinanet', 'ssd', 'gn', 'ghm', 'fsaf',
33+
'point_rend', 'nas_fcos', 'pisa', 'dynamic_rcnn'
34+
]
35+
36+
datasets_root = ['wider_face', 'pascal_voc', 'cityscapes', 'mask_rcnn']
37+
38+
data_pipeline_root = [
39+
'albu_example', 'instaboost', 'ssd', 'mask_rcnn', 'nas_fpn'
40+
]
41+
42+
nn_module_root = [
43+
'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn+ws', 'hrnet', 'pafpn',
44+
'nas_fpn', 'regnet'
45+
]
46+
47+
benchmark_pool = [
48+
'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py',
49+
'configs/htc/htc_r50_fpn_1x_coco.py',
50+
'configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
51+
'configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py',
52+
'configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py',
53+
'configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x.py',
54+
'configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py',
55+
'configs/regnet/mask_rcnn_regnetx-3GF_fpn_1x_coco.py',
56+
'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py',
57+
'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
58+
'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py',
59+
'configs/rpn/rpn_r50_fpn_1x_coco.py',
60+
'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py',
61+
'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py',
62+
'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py',
63+
'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
64+
'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
65+
'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
66+
'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py',
67+
'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py',
68+
'configs/ssd/ssd300_coco.py',
69+
'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py',
70+
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py',
71+
'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py', # noqa
72+
'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py',
73+
'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
74+
'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
75+
'configs/fsaf/fsaf_r50_fpn_1x_coco.py',
76+
'configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py',
77+
'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
78+
'configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py',
79+
'configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py',
80+
'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
81+
'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py',
82+
'configs/wider_face/ssd300_wider_face.py',
83+
'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
84+
'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
85+
'configs/atss/atss_r50_fpn_1x_coco.py',
86+
'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py',
87+
'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
88+
'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
89+
'configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py',
90+
'configs/pascal_voc/ssd300_voc0712.py',
91+
'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
92+
'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
93+
'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py'
94+
]
95+
96+
97+
def main():
98+
args = parse_args()
99+
100+
benchmark_type = []
101+
if args.basic_arch:
102+
benchmark_type += basic_arch_root
103+
if args.datasets:
104+
benchmark_type += datasets_root
105+
if args.data_pipeline:
106+
benchmark_type += data_pipeline_root
107+
if args.nn_module:
108+
benchmark_type += nn_module_root
109+
110+
config_dpath = 'configs/'
111+
benchmark_configs = []
112+
for cfg_root in benchmark_type:
113+
cfg_dir = osp.join(config_dpath, cfg_root)
114+
configs = os.scandir(cfg_dir)
115+
for cfg in configs:
116+
config_path = osp.join(cfg_dir, cfg.name)
117+
if (config_path in benchmark_pool
118+
and config_path not in benchmark_configs):
119+
benchmark_configs.append(config_path)
120+
121+
print(f'Totally found {len(benchmark_configs)} configs to benchmark')
122+
config_dicts = dict(models=benchmark_configs)
123+
mmcv.dump(config_dicts, 'regression_test_configs.json')
124+
125+
126+
if __name__ == '__main__':
127+
main()

.dev_scripts/gather_models.py

+171
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,171 @@
1+
import argparse
2+
import glob
3+
import json
4+
import os.path as osp
5+
import shutil
6+
import subprocess
7+
8+
import mmcv
9+
import torch
10+
11+
# build schedule look-up table to automatically find the final model
12+
SCHEDULES_LUT = {
13+
'_1x_': 12,
14+
'_2x_': 24,
15+
'_20e_': 20,
16+
'_3x_': 36,
17+
'_4x_': 48,
18+
'_24e_': 24,
19+
'_6x_': 73
20+
}
21+
RESULTS_LUT = ['bbox_mAP', 'segm_mAP']
22+
23+
24+
def process_checkpoint(in_file, out_file):
25+
checkpoint = torch.load(in_file, map_location='cpu')
26+
# remove optimizer for smaller file size
27+
if 'optimizer' in checkpoint:
28+
del checkpoint['optimizer']
29+
# if it is necessary to remove some sensitive data in checkpoint['meta'],
30+
# add the code here.
31+
torch.save(checkpoint, out_file)
32+
sha = subprocess.check_output(['sha256sum', out_file]).decode()
33+
final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
34+
subprocess.Popen(['mv', out_file, final_file])
35+
return final_file
36+
37+
38+
def get_final_epoch(config):
39+
if config.find('grid_rcnn') != -1 and config.find('2x') != -1:
40+
# grid_rcnn 2x trains 25 epochs
41+
return 25
42+
43+
for schedule_name, epoch_num in SCHEDULES_LUT.items():
44+
if config.find(schedule_name) != -1:
45+
return epoch_num
46+
47+
48+
def get_final_results(log_json_path, epoch):
49+
result_dict = dict()
50+
with open(log_json_path, 'r') as f:
51+
for line in f.readlines():
52+
log_line = json.loads(line)
53+
if 'mode' not in log_line.keys():
54+
continue
55+
56+
if log_line['mode'] == 'train' and log_line['epoch'] == epoch:
57+
result_dict['memory'] = log_line['memory']
58+
59+
if log_line['mode'] == 'val' and log_line['epoch'] == epoch:
60+
result_dict.update({
61+
key: log_line[key]
62+
for key in RESULTS_LUT if key in log_line
63+
})
64+
return result_dict
65+
66+
67+
def parse_args():
68+
parser = argparse.ArgumentParser(description='Gather benchmarked models')
69+
parser.add_argument(
70+
'root',
71+
type=str,
72+
help='root path of benchmarked models to be gathered')
73+
parser.add_argument(
74+
'out', type=str, help='output path of gathered models to be stored')
75+
76+
args = parser.parse_args()
77+
return args
78+
79+
80+
def main():
81+
args = parse_args()
82+
models_root = args.root
83+
models_out = args.out
84+
mmcv.mkdir_or_exist(models_out)
85+
86+
# find all models in the root directory to be gathered
87+
raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True))
88+
89+
# filter configs that is not trained in the experiments dir
90+
used_configs = []
91+
for raw_config in raw_configs:
92+
if osp.exists(osp.join(models_root, raw_config)):
93+
used_configs.append(raw_config)
94+
print(f'Find {len(used_configs)} models to be gathered')
95+
96+
# find final_ckpt and log file for trained each config
97+
# and parse the best performance
98+
model_infos = []
99+
for used_config in used_configs:
100+
exp_dir = osp.join(models_root, used_config)
101+
# check whether the exps is finished
102+
final_epoch = get_final_epoch(used_config)
103+
final_model = 'epoch_{}.pth'.format(final_epoch)
104+
model_path = osp.join(exp_dir, final_model)
105+
106+
# skip if the model is still training
107+
if not osp.exists(model_path):
108+
continue
109+
110+
# get logs
111+
log_json_path = glob.glob(osp.join(exp_dir, '*.log.json'))[0]
112+
log_txt_path = glob.glob(osp.join(exp_dir, '*.log'))[0]
113+
model_performance = get_final_results(log_json_path, final_epoch)
114+
115+
if model_performance is None:
116+
continue
117+
118+
model_time = osp.split(log_txt_path)[-1].split('.')[0]
119+
model_infos.append(
120+
dict(
121+
config=used_config,
122+
results=model_performance,
123+
epochs=final_epoch,
124+
model_time=model_time,
125+
log_json_path=osp.split(log_json_path)[-1]))
126+
127+
# publish model for each checkpoint
128+
publish_model_infos = []
129+
for model in model_infos:
130+
model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))
131+
mmcv.mkdir_or_exist(model_publish_dir)
132+
133+
model_name = osp.split(model['config'])[-1].split('.')[0]
134+
135+
model_name += '_' + model['model_time']
136+
publish_model_path = osp.join(model_publish_dir, model_name)
137+
trained_model_path = osp.join(models_root, model['config'],
138+
'epoch_{}.pth'.format(model['epochs']))
139+
140+
# convert model
141+
final_model_path = process_checkpoint(trained_model_path,
142+
publish_model_path)
143+
144+
# copy log
145+
shutil.copy(
146+
osp.join(models_root, model['config'], model['log_json_path']),
147+
osp.join(model_publish_dir, f'{model_name}.log.json'))
148+
shutil.copy(
149+
osp.join(models_root, model['config'],
150+
model['log_json_path'].rstrip('.json')),
151+
osp.join(model_publish_dir, f'{model_name}.log'))
152+
153+
# copy config to guarantee reproducibility
154+
config_path = model['config']
155+
config_path = osp.join(
156+
'configs',
157+
config_path) if 'configs' not in config_path else config_path
158+
target_cconfig_path = osp.split(config_path)[-1]
159+
shutil.copy(config_path,
160+
osp.join(model_publish_dir, target_cconfig_path))
161+
162+
model['model_path'] = final_model_path
163+
publish_model_infos.append(model)
164+
165+
models = dict(models=publish_model_infos)
166+
print(f'Totally gathered {len(publish_model_infos)} models')
167+
mmcv.dump(models, osp.join(models_out, 'model_info.json'))
168+
169+
170+
if __name__ == '__main__':
171+
main()

.dev_scripts/linter.sh

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
yapf -r -i --style .style.yapf mmdet/ configs/ tests/ tools/
2+
isort -rc mmdet/ configs/ tests/ tools/
3+
flake8 .

.github/CODE_OF_CONDUCT.md

+76
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
# Contributor Covenant Code of Conduct
2+
3+
## Our Pledge
4+
5+
In the interest of fostering an open and welcoming environment, we as
6+
contributors and maintainers pledge to making participation in our project and
7+
our community a harassment-free experience for everyone, regardless of age, body
8+
size, disability, ethnicity, sex characteristics, gender identity and expression,
9+
level of experience, education, socio-economic status, nationality, personal
10+
appearance, race, religion, or sexual identity and orientation.
11+
12+
## Our Standards
13+
14+
Examples of behavior that contributes to creating a positive environment
15+
include:
16+
17+
* Using welcoming and inclusive language
18+
* Being respectful of differing viewpoints and experiences
19+
* Gracefully accepting constructive criticism
20+
* Focusing on what is best for the community
21+
* Showing empathy towards other community members
22+
23+
Examples of unacceptable behavior by participants include:
24+
25+
* The use of sexualized language or imagery and unwelcome sexual attention or
26+
advances
27+
* Trolling, insulting/derogatory comments, and personal or political attacks
28+
* Public or private harassment
29+
* Publishing others' private information, such as a physical or electronic
30+
address, without explicit permission
31+
* Other conduct which could reasonably be considered inappropriate in a
32+
professional setting
33+
34+
## Our Responsibilities
35+
36+
Project maintainers are responsible for clarifying the standards of acceptable
37+
behavior and are expected to take appropriate and fair corrective action in
38+
response to any instances of unacceptable behavior.
39+
40+
Project maintainers have the right and responsibility to remove, edit, or
41+
reject comments, commits, code, wiki edits, issues, and other contributions
42+
that are not aligned to this Code of Conduct, or to ban temporarily or
43+
permanently any contributor for other behaviors that they deem inappropriate,
44+
threatening, offensive, or harmful.
45+
46+
## Scope
47+
48+
This Code of Conduct applies both within project spaces and in public spaces
49+
when an individual is representing the project or its community. Examples of
50+
representing a project or community include using an official project e-mail
51+
address, posting via an official social media account, or acting as an appointed
52+
representative at an online or offline event. Representation of a project may be
53+
further defined and clarified by project maintainers.
54+
55+
## Enforcement
56+
57+
Instances of abusive, harassing, or otherwise unacceptable behavior may be
58+
reported by contacting the project team at [email protected]. All
59+
complaints will be reviewed and investigated and will result in a response that
60+
is deemed necessary and appropriate to the circumstances. The project team is
61+
obligated to maintain confidentiality with regard to the reporter of an incident.
62+
Further details of specific enforcement policies may be posted separately.
63+
64+
Project maintainers who do not follow or enforce the Code of Conduct in good
65+
faith may face temporary or permanent repercussions as determined by other
66+
members of the project's leadership.
67+
68+
## Attribution
69+
70+
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71+
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
72+
73+
[homepage]: https://www.contributor-covenant.org
74+
75+
For answers to common questions about this code of conduct, see
76+
https://www.contributor-covenant.org/faq

0 commit comments

Comments
 (0)