@@ -1416,7 +1416,7 @@ def __init__(self, graph_dict, imageless_dataloaders, image_dir,
1416
1416
1417
1417
# set this graph to use for generating corrupt pairs on the fly
1418
1418
# so this graph should correspond to the graph: fully connected graph - tc graph
1419
- # for the train set
1419
+ # for the train setf
1420
1420
self .criterion .set_negative_graph (self .graph_dict ['G_train_neg' ], self .graph_dict ['mapping_node_to_ix' ],
1421
1421
self .graph_dict ['mapping_ix_to_node' ])
1422
1422
#
@@ -1514,9 +1514,14 @@ def run_model(self, optimizer):
1514
1514
self .optimizer_labels = optim .SGD ([{'params' : self .model .parameters (), 'lr' : 0.0 }], momentum = 0.0 )
1515
1515
self .optimizer_images = optim .Adam ([{'params' : self .img_feat_net .parameters ()}], lr = self .lr_images )
1516
1516
else :
1517
+ params_to_update = [{'params' : self .model .parameters ()},
1518
+ {'params' : self .img_feat_net .parameters ()}]
1519
+
1517
1520
self .optimizer_labels = optim .Adam ([{'params' : self .model .parameters ()}], lr = self .lr_labels )
1518
1521
self .optimizer_images = optim .Adam ([{'params' : self .img_feat_net .parameters ()}], lr = self .lr_images )
1519
1522
1523
+ self .optimizer_labels = optim .Adam ([{'params' : list (self .model .parameters ()) + list (self .img_feat_net .parameters ())}], lr = self .lr_labels )
1524
+
1520
1525
self .scheduler_labels = torch .optim .lr_scheduler .MultiStepLR (self .optimizer_labels , milestones = self .lr_step , gamma = 0.1 )
1521
1526
self .scheduler_images = torch .optim .lr_scheduler .MultiStepLR (self .optimizer_images , milestones = self .lr_step , gamma = 0.1 )
1522
1527
@@ -1732,7 +1737,7 @@ def pass_samples(self, phase, save_to_tensorboard=True):
1732
1737
1733
1738
# zero the parameter gradients
1734
1739
self .optimizer_labels .zero_grad ()
1735
- self .optimizer_images .zero_grad ()
1740
+ # self.optimizer_images.zero_grad()
1736
1741
1737
1742
# forward
1738
1743
# track history if only in train
@@ -1762,7 +1767,7 @@ def pass_samples(self, phase, save_to_tensorboard=True):
1762
1767
# convert euclidean gradients to riemannian gradients for the label embeddings
1763
1768
self .model .module .embeddings .weight .grad .data *= (1.0 / self .lambda_x (self .model .module .embeddings .weight .data ))** 2
1764
1769
self .optimizer_labels .step ()
1765
- self .optimizer_images .step ()
1770
+ # self.optimizer_images.step()
1766
1771
self .model .module .embeddings .weight .data = self .soft_clip (self .model .module .embeddings .weight .data )
1767
1772
1768
1773
# statistics
0 commit comments