Skip to content

Commit 93d3221

Browse files
author
dihehuang
committed
remove unuse code
1 parent e1a3261 commit 93d3221

25 files changed

+41
-1829
lines changed
+3-9
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,19 @@
11
from configs.data.base import cfg
22

3-
# TEST_BASE_PATH = "assets/megadepth_test_1500_scene_info" # "./datasets/megadepth/index" # "assets/megadepth_test_1500_scene_info"
3+
44
TEST_BASE_PATH = "./datasets/megadepth_scale_data/scale_data_0125"
55

66
cfg.DATASET.TEST_DATA_SOURCE = "MegaDepth"
77
cfg.DATASET.TEST_DATA_ROOT = "./datasets/megadepth/test"
8-
cfg.DATASET.TEST_NPZ_ROOT = (
9-
f"{TEST_BASE_PATH}" # f"{TEST_BASE_PATH}/scene_info_0.1_0.7" # f"{TEST_BASE_PATH}"
10-
)
11-
# cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/megadepth_test_1500.txt" # f"assets/megadepth_test_1500_scene_info/megadepth_test_1500.txt" # f"{TEST_BASE_PATH}/megadepth_test_1500.txt"
8+
cfg.DATASET.TEST_NPZ_ROOT = f"{TEST_BASE_PATH}"
9+
1210
cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/test_45.txt"
1311
# cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/test_34.txt"
1412
# cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/test_23.txt"
1513
# cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/test_23.txt"
1614
# cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/test_25.txt"
1715
# cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/test_15.txt"
18-
# cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/test_13.txt"
19-
20-
# cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/megadepth_test_1500_scale.txt"
2116

22-
# cfg.TRAINER.N_SAMPLES_PER_SUBSET = 100
2317

2418
cfg.DATASET.MGDPT_IMG_RESIZE = 832
2519
cfg.DATASET.MIN_OVERLAP_SCORE_TEST = 0.0

configs/data/megadepth_trainval_832.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -8,19 +8,19 @@
88
cfg.DATASET.TRAIN_LIST_PATH = f"{TRAIN_BASE_PATH}/trainvaltest_list/train_list3.txt"
99
cfg.DATASET.MIN_OVERLAP_SCORE_TRAIN = 0.0
1010

11-
TEST_BASE_PATH = "./datasets/megadepth/index"
11+
TEST_BASE_PATH = "./datasets/megadepth_scale_data/scale_data_0125"
1212
cfg.DATASET.TEST_DATA_SOURCE = "MegaDepth"
1313
cfg.DATASET.VAL_DATA_ROOT = cfg.DATASET.TEST_DATA_ROOT = "./datasets/megadepth/test"
1414
cfg.DATASET.VAL_NPZ_ROOT = (
1515
cfg.DATASET.TEST_NPZ_ROOT
16-
) = f"{TEST_BASE_PATH}/scene_info_val_1500"
16+
) = f"{TEST_BASE_PATH}"
1717
cfg.DATASET.VAL_LIST_PATH = (
1818
cfg.DATASET.TEST_LIST_PATH
19-
) = f"{TEST_BASE_PATH}/trainvaltest_list/val_list.txt"
19+
) = f"{TEST_BASE_PATH}/test_15.txt"
2020
cfg.DATASET.MIN_OVERLAP_SCORE_TEST = 0.0 # for both test and val
2121

2222
# 368 scenes in total for MegaDepth
2323
# (with difficulty balanced (further split each scene to 3 sub-scenes))
24-
cfg.TRAINER.N_SAMPLES_PER_SUBSET = 100 # 100
24+
cfg.TRAINER.N_SAMPLES_PER_SUBSET = 100
2525

26-
cfg.DATASET.MGDPT_IMG_RESIZE = 832 # for training on 11GB mem GPUs
26+
cfg.DATASET.MGDPT_IMG_RESIZE = 832

scripts/reproduce_test/googleurban_0.1.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ PROJECT_DIR="${SCRIPTPATH}/../../"
77
export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH
88
cd $PROJECT_DIR
99

10-
# data_cfg_path="configs/data/megadepth_test_1500.py"
10+
# data_cfg_path="configs/data/megadepth_test_4000.py"
1111
data_cfg_path="configs/data/googleurban_0.1_val.py"
1212
main_cfg_path="configs/loftr/outdoor/loftr_ds_dense.py"
1313
ckpt_path="./weights/adamatcher.ckpt"

scripts/reproduce_test/outdoor_ada_scale.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ PROJECT_DIR="${SCRIPTPATH}/../../"
77
export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH
88
cd $PROJECT_DIR
99

10-
# data_cfg_path="configs/data/megadepth_test_1500.py"
10+
# data_cfg_path="configs/data/megadepth_test_4000.py"
1111
data_cfg_path="configs/data/megadepth_test_scale_1000.py"
1212
main_cfg_path="configs/loftr/outdoor/loftr_ds_dense.py"
1313
ckpt_path="weights/adamatcher.ckpt"

scripts/reproduce_test/phototourism_0.1.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ PROJECT_DIR="${SCRIPTPATH}/../../"
77
export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH
88
cd $PROJECT_DIR
99

10-
# data_cfg_path="configs/data/megadepth_test_1500.py"
10+
# data_cfg_path="configs/data/megadepth_test_4000.py"
1111
data_cfg_path="configs/data/phototourism_0.1_val.py"
1212
main_cfg_path="configs/loftr/outdoor/loftr_ds_dense.py"
1313
ckpt_path="weights/adamatcher"

scripts/reproduce_test/pragueparks_0.1.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ PROJECT_DIR="${SCRIPTPATH}/../../"
77
export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH
88
cd $PROJECT_DIR
99

10-
# data_cfg_path="configs/data/megadepth_test_1500.py"
10+
# data_cfg_path="configs/data/megadepth_test_4000.py"
1111
data_cfg_path="configs/data/pragueparks_0.1_val.py"
1212
main_cfg_path="configs/loftr/outdoor/loftr_ds_dense.py"
1313
ckpt_path="weights/adamatcher.ckpt"

scripts/reproduce_test/yfcc100m.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ PROJECT_DIR="${SCRIPTPATH}/../../"
77
export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH
88
cd $PROJECT_DIR
99

10-
# data_cfg_path="configs/data/megadepth_test_1500.py"
10+
# data_cfg_path="configs/data/megadepth_test_4000.py"
1111
data_cfg_path="configs/data/yfcc100m.py"
1212
main_cfg_path="configs/loftr/outdoor/loftr_ds_dense_yfcc.py"
1313

scripts/reproduce_train/32gpus_1bs_832.sh

+3-3
Original file line numberDiff line numberDiff line change
@@ -8,15 +8,15 @@ export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH
88
cd $PROJECT_DIR
99

1010
TRAIN_IMG_SIZE=832
11-
data_cfg_path="configs/data/megadepth_all_trainval_${TRAIN_IMG_SIZE}.py"
11+
data_cfg_path="configs/data/megadepth_trainval_${TRAIN_IMG_SIZE}.py"
1212
main_cfg_path="configs/loftr/outdoor/loftr_ds_dense.py"
1313

1414
n_nodes=4
1515
n_gpus_per_node=8 # 1 4 8
1616
torch_num_workers=8 # 1 4 8
1717
batch_size=1
1818
pin_memory=true
19-
exp_name="resnet_scc_c_scc_2500-${TRAIN_IMG_SIZE}-bs$(($n_gpus_per_node * $n_nodes * $batch_size))"
19+
exp_name="AdaMatcher-${TRAIN_IMG_SIZE}-bs$(($n_gpus_per_node * $n_nodes * $batch_size))"
2020

2121
python3 -u ./train.py \
2222
${data_cfg_path} \
@@ -30,4 +30,4 @@ python3 -u ./train.py \
3030
--limit_val_batches=1. \
3131
--num_sanity_val_steps=10 \
3232
--benchmark=True \
33-
--max_epochs=30 >> ./OUTPUT/resnet_scc_c_scc_2500.txt
33+
--max_epochs=30 >> ./OUTPUT/AdaMatcher.txt

src/adamatcher/ada_module/linear_attention.py

-51
Original file line numberDiff line numberDiff line change
@@ -103,57 +103,6 @@ def forward(self, q, k, v, q_mask=None, kv_mask=None):
103103
return message
104104

105105

106-
class SimAttention(Module):
107-
def __init__(self, eps=1e-6):
108-
super().__init__()
109-
self.feature_map = elu_feature_map
110-
self.eps = eps
111-
112-
self.linear = torch.nn.Linear(2, 1)
113-
self.sigmoid = torch.nn.Sigmoid()
114-
115-
def forward(self, queries, keys, values, q_mask=None, kv_mask=None):
116-
"""Multi-Head linear attention proposed in "Transformers are RNNs"
117-
Args:
118-
queries: [N, L, H, D]
119-
keys: [N, S, H, D]
120-
values: [N, S, H, D]
121-
q_mask: [N, L]
122-
kv_mask: [N, S]
123-
Returns:
124-
queried_values: (N, L, H, D)
125-
"""
126-
# pdb.set_trace()
127-
Q = self.feature_map(queries)
128-
K = self.feature_map(keys)
129-
# Q = queries
130-
# K = keys
131-
132-
# set padded position to zero
133-
if q_mask is not None:
134-
Q = Q * q_mask[:, :, None, None]
135-
queries = queries * q_mask[:, :, None, None]
136-
if kv_mask is not None:
137-
K = K * kv_mask[:, :, None, None]
138-
values = values * kv_mask[:, :, None, None]
139-
140-
# v_length = values.size(1)
141-
# values = values / v_length # prevent fp16 overflow
142-
# KV = torch.einsum("nshd,nshv->nhdv", K, values) # (S,D)' @ S,V
143-
# Z = 1 / (torch.einsum("nlhd,nhd->nlh", Q, K.sum(dim=1)) + self.eps)
144-
# queried_values = torch.einsum("nlhd,nhdv,nlh->nlhv", Q, KV, Z) * v_length
145-
146-
qk = torch.einsum('nlhd,nshd->nlsh', Q, K) # [n,l,s,h]
147-
qk_mean = torch.mean(qk, dim=2, keepdim=True) # [n,l,1,h]
148-
qk_max, _ = torch.max(qk, dim=2, keepdim=True) # [n,l,1,h]
149-
atten = torch.cat([qk_mean, qk_max], dim=2).permute(0, 1, 3,
150-
2) # [n,l,h,2]
151-
atten = self.sigmoid(self.linear(atten).squeeze(-1)) # [n,l,h]
152-
queried_values = torch.einsum('nlhd,nlh->nlhd', queries, atten)
153-
# queried_values = torch.einsum('nlhd,nlh->nlhd', Q, atten)
154-
return queried_values.contiguous()
155-
156-
157106
class FullAttention(Module):
158107
def __init__(self, use_dropout=False, attention_dropout=0.1):
159108
super().__init__()

src/adamatcher/ada_module/transformer.py

+1-4
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
FullAttention,
1111
LinearAttention,
1212
MultiHeadAttention,
13-
SimAttention,
1413
)
1514

1615

@@ -28,8 +27,6 @@ def __init__(self, d_model, nhead, attention="linear"):
2827
self.attention = (
2928
LinearAttention()
3029
if attention == "linear"
31-
else SimAttention()
32-
if attention == "sim"
3330
else FullAttention()
3431
)
3532
self.merge = nn.Linear(d_model, d_model, bias=False)
@@ -89,7 +86,7 @@ def __init__(self, config):
8986
self.d_model = config["d_model"]
9087
self.nhead = config["nhead"]
9188
self.layer_names = config["layer_names"]
92-
encoder_layer = LoFTREncoderLayer(
89+
encoder_layer = EncoderLayer(
9390
config["d_model"], config["nhead"], config["attention"]
9491
)
9592
self.layers = nn.ModuleList(

src/adamatcher/adamatcher.py

+2-7
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@ def __init__(self, config, training=True):
2828
self.pos_encoding = PositionEncodingSine(
2929
config["coarse"]["d_model"], max_shape=(512, 512)
3030
)
31-
self.backbone = build_backbone(config) # ResNetFPN_64_8_2
32-
self.feature_interaction = FICAS() # FeatureAttention()
31+
self.backbone = build_backbone(config)
32+
self.feature_interaction = FICAS()
3333

3434
self.coarse_module = CoarseModule(config["match_coarse"], config["resolution"])
3535
self.fine_module = FineModule(config["resolution"])
@@ -102,13 +102,8 @@ def forward(self, data):
102102
{"cas_score0": cas_score0, "cas_score1": cas_score1} # [N, h0_l1, w0_l1]
103103
) # [N, h1_l1, w1_l1]
104104

105-
# torch.cuda.synchronize()
106-
# self.ficas_time += time.time() - t1
107-
108105
# coarse match
109106
self.coarse_module(data, mask_feat0, mask_feat1)
110107

111108
# sub-pixel refinement
112-
# feat_d2_0 = self.pos_encoding_fine(feat_d2_0)
113-
# feat_d2_1 = self.pos_encoding_fine(feat_d2_1)
114109
self.fine_module(data, feat_d2_0, feat_d2_1, feat_c_0, feat_c_1)

src/adamatcher/backbone/feature_interaction.py

+8-28
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ def make_head_layer(cnv_dim, curr_dim, out_dim, head_name=None):
2121
# nn.BatchNorm2d(curr_dim, eps=1e-3, momentum=0.01),
2222
nn.ReLU(inplace=True),
2323
nn.Conv2d(curr_dim, out_dim, kernel_size=3, stride=1, padding=1),
24-
) # kernel=1, padding=0, bias=True
24+
)
2525

2626
for l in fc.modules():
2727
if isinstance(l, nn.Conv2d):
@@ -72,7 +72,6 @@ def forward(self, x0, x1, x0_mask=None, x1_mask=None, flag=False):
7272
if x0_mask != None and x1_mask != None:
7373
x0_mask, x1_mask = x0_mask.flatten(-2), x1_mask.flatten(-2)
7474

75-
save_feat = []
7675
if flag is False:
7776
for i, (layer, name) in enumerate(zip(self.layers, self.layer_names)):
7877
if name == "self":
@@ -85,9 +84,6 @@ def forward(self, x0, x1, x0_mask=None, x1_mask=None, flag=False):
8584
raise KeyError
8685
x0 = layer(x0, src0, x0_mask, src0_mask)
8786
x1 = layer(x1, src1, x1_mask, src1_mask)
88-
if i == 1: # i==len(self.layer_names)//2-1:
89-
# print(i, len(self.layer_names))
90-
save_feat.append((x0, x1))
9187
elif flag == 1: # origin
9288
for layer, name in zip(self.layers, self.layer_names):
9389
if name == "self":
@@ -109,11 +105,7 @@ def forward(self, x0, x1, x0_mask=None, x1_mask=None, flag=False):
109105
else:
110106
raise KeyError
111107

112-
# return feat0, feat1
113-
if len(save_feat) > 0:
114-
return x0, x1, save_feat
115-
else:
116-
return x0, x1
108+
return x0, x1
117109

118110

119111
class SegmentationModule(nn.Module):
@@ -129,22 +121,15 @@ def __init__(self, d_model, num_query):
129121
def forward(self, x, hs, mask=None):
130122
# x:[n, 256, h, w] hs:[n, num_q, 256]
131123

132-
# TODO: BN
133124
if mask is not None:
134-
# hs = self.encoderlayer(hs, x3_flatten, None, mask_flatten)
135125
attn_mask = torch.einsum("mqc,mchw->mqhw", hs, x)
136-
# attn_mask = self.bn(attn_mask)
137-
# attn_mask = attn_mask * self.gamma
138126
attn_mask = attn_mask.sigmoid() * mask.unsqueeze(1)
139127
classification = self.block(x * attn_mask + x).sigmoid().squeeze(1) * mask
140128
else:
141-
# hs = self.encoderlayer(hs, x3_flatten)
142129
attn_mask = torch.einsum("mqc,mchw->mqhw", hs, x)
143-
# attn_mask = self.bn(attn_mask)
144-
# attn_mask = attn_mask * self.gamma
145130
attn_mask = attn_mask.sigmoid()
146131
classification = self.block(x * attn_mask + x).sigmoid().squeeze(1)
147-
return classification # , attn_mask # , mask_feat
132+
return classification
148133

149134

150135
class FICAS(nn.Module):
@@ -166,7 +151,7 @@ def __init__(self, layer_num=4, d_model=256):
166151
self.layer_names1 = [
167152
"self",
168153
"cross",
169-
] # ['self', 'cross', 'cross'] # ['self', 'cross'] origin for eccv
154+
]
170155
self.layers1 = nn.ModuleList(
171156
[copy.deepcopy(encoder_layer) for _ in range(len(self.layer_names1))]
172157
)
@@ -186,7 +171,7 @@ def __init__(self, layer_num=4, d_model=256):
186171
self.layer_names3 = [
187172
"self",
188173
"cross",
189-
] # ['self', 'cross', 'cross'] # ['self', 'cross'] origin for eccv
174+
]
190175
self.layers3 = nn.ModuleList(
191176
[copy.deepcopy(encoder_layer) for _ in range(len(self.layer_names3))]
192177
)
@@ -216,8 +201,7 @@ def transformer(self, x0, x1, x0_mask, x1_mask, layer_name, layer):
216201
and src1_mask is not None
217202
and not self.training
218203
and 0
219-
): # \
220-
# and layer_name == 'self' and 0:
204+
):
221205
temp_x = layer(
222206
torch.cat([x0, x1], dim=0),
223207
torch.cat([src0, src1], dim=0),
@@ -252,8 +236,7 @@ def feature_interaction(self, x0, x1, x0_mask=None, x1_mask=None):
252236
feature_embed1 = self.feature_embed.weight.unsqueeze(0).repeat(bs, 1, 1)
253237
tgt0 = torch.zeros_like(feature_embed0)
254238
tgt1 = torch.zeros_like(feature_embed1)
255-
# hs0 = self.decoder(tgt0, x0, tgt_mask=None, memory_mask=x0_mask)
256-
# hs1 = self.decoder(tgt1, x1, tgt_mask=None, memory_mask=x1_mask)
239+
257240
if (
258241
0
259242
): # x0.shape==x1.shape and x0_mask is not None and x0_mask.shape==x1_mask.shape:
@@ -331,10 +314,7 @@ def forward(self, x0, x1, x0_mask=None, x1_mask=None, use_cas=True):
331314
out0, out1, hs0, hs1, x0_mid, x1_mid = self.feature_interaction(
332315
x0, x1, x0_mask, x1_mask
333316
)
334-
# out0 = rearrange(out0, 'n (h w) c -> n c h w',
335-
# h=h0, w=w0).contiguous()
336-
# out1 = rearrange(out1, 'n (h w) c -> n c h w',
337-
# h=h1, w=w1).contiguous()
317+
338318
if use_cas:
339319
x0_mid = rearrange(x0_mid, "n (h w) c -> n c h w", h=h0, w=w0).contiguous()
340320
x1_mid = rearrange(x1_mid, "n (h w) c -> n c h w", h=h1, w=w1).contiguous()

src/adamatcher/localization/__init__.py

Whitespace-only changes.

0 commit comments

Comments
 (0)