@@ -456,20 +456,23 @@ def check_frequency(self, check_idx):
456456 @rank_zero_only
457457 def on_train_batch_end (self , trainer , pl_module , outputs , batch , batch_idx ):
458458 if not self .disabled and (pl_module .global_step > 0 or self .log_first_step ):
459- self .log_img (pl_module , batch , batch_idx , split = "train" )
459+ # self.log_img(pl_module, batch, batch_idx, split="train")
460+ pass
460461
461462 @rank_zero_only
462463 def on_train_batch_start (self , trainer , pl_module , batch , batch_idx ):
463464 if self .log_before_first_step and pl_module .global_step == 0 :
464465 print (f"{ self .__class__ .__name__ } : logging before training" )
465- self .log_img (pl_module , batch , batch_idx , split = "train" )
466+ # self.log_img(pl_module, batch, batch_idx, split="train")
467+ pass
466468
467469 @rank_zero_only
468470 def on_validation_batch_end (
469471 self , trainer , pl_module , outputs , batch , batch_idx , * args , ** kwargs
470472 ):
471473 if not self .disabled and pl_module .global_step > 0 :
472- self .log_img (pl_module , batch , batch_idx , split = "val" )
474+ # self.log_img(pl_module, batch, batch_idx, split="val")
475+ pass
473476 if hasattr (pl_module , "calibrate_grad_norm" ):
474477 if (
475478 pl_module .calibrate_grad_norm and batch_idx % 25 == 0
@@ -831,8 +834,6 @@ def init_wandb(save_dir, opt, config, group_name, name_str):
831834 # NOTE according to https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html
832835 # calling these ourselves should not be necessary but it is.
833836 # lightning still takes care of proper multiprocessing though
834- print ("DATAAAAAAA" , data )
835- print ("-" * 100 )
836837 data .prepare_data ()
837838 # data.setup()
838839 print ("#### Data #####" )
0 commit comments