diff --git a/docs/api_docs/python/tfr/_api_cache.json b/docs/api_docs/python/tfr/_api_cache.json
index 8ed2641..3e48146 100644
--- a/docs/api_docs/python/tfr/_api_cache.json
+++ b/docs/api_docs/python/tfr/_api_cache.json
@@ -300,7 +300,6 @@
"tfr.keras.layers.ConcatFeatures.__new__": "tfr.keras.layers.Bilinear.__new__",
"tfr.keras.layers.ConcatFeatures.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.layers.ConcatFeatures.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.layers.ConcatFeatures.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.layers.ConcatFeatures.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.layers.ConcatFeatures.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
"tfr.keras.layers.ConcatFeatures.compute_mask": "tfr.keras.layers.Bilinear.compute_mask",
@@ -312,12 +311,14 @@
"tfr.keras.layers.ConcatFeatures.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.layers.ConcatFeatures.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.layers.ConcatFeatures.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.layers.ConcatFeatures.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.layers.ConcatFeatures.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.layers.ConcatFeatures.metrics": "tfr.keras.layers.Bilinear.metrics",
"tfr.keras.layers.ConcatFeatures.name": "tfr.keras.layers.Bilinear.name",
"tfr.keras.layers.ConcatFeatures.name_scope": "tfr.keras.layers.Bilinear.name_scope",
"tfr.keras.layers.ConcatFeatures.non_trainable_weights": "tfr.keras.layers.Bilinear.non_trainable_weights",
"tfr.keras.layers.ConcatFeatures.output": "tfr.keras.layers.Bilinear.output",
+ "tfr.keras.layers.ConcatFeatures.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.layers.ConcatFeatures.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.layers.ConcatFeatures.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.layers.ConcatFeatures.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -335,7 +336,6 @@
"tfr.keras.layers.DocumentInteractionAttention.__new__": "tfr.keras.layers.Bilinear.__new__",
"tfr.keras.layers.DocumentInteractionAttention.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.layers.DocumentInteractionAttention.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.layers.DocumentInteractionAttention.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.layers.DocumentInteractionAttention.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.layers.DocumentInteractionAttention.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
"tfr.keras.layers.DocumentInteractionAttention.compute_mask": "tfr.keras.layers.Bilinear.compute_mask",
@@ -348,12 +348,14 @@
"tfr.keras.layers.DocumentInteractionAttention.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.layers.DocumentInteractionAttention.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.layers.DocumentInteractionAttention.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.layers.DocumentInteractionAttention.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.layers.DocumentInteractionAttention.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.layers.DocumentInteractionAttention.metrics": "tfr.keras.layers.Bilinear.metrics",
"tfr.keras.layers.DocumentInteractionAttention.name": "tfr.keras.layers.Bilinear.name",
"tfr.keras.layers.DocumentInteractionAttention.name_scope": "tfr.keras.layers.Bilinear.name_scope",
"tfr.keras.layers.DocumentInteractionAttention.non_trainable_weights": "tfr.keras.layers.Bilinear.non_trainable_weights",
"tfr.keras.layers.DocumentInteractionAttention.output": "tfr.keras.layers.Bilinear.output",
+ "tfr.keras.layers.DocumentInteractionAttention.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.layers.DocumentInteractionAttention.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.layers.DocumentInteractionAttention.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.layers.DocumentInteractionAttention.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -371,7 +373,6 @@
"tfr.keras.layers.FlattenList.__new__": "tfr.keras.layers.Bilinear.__new__",
"tfr.keras.layers.FlattenList.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.layers.FlattenList.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.layers.FlattenList.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.layers.FlattenList.build": "tfr.keras.layers.ConcatFeatures.build",
"tfr.keras.layers.FlattenList.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.layers.FlattenList.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
@@ -385,12 +386,14 @@
"tfr.keras.layers.FlattenList.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.layers.FlattenList.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.layers.FlattenList.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.layers.FlattenList.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.layers.FlattenList.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.layers.FlattenList.metrics": "tfr.keras.layers.Bilinear.metrics",
"tfr.keras.layers.FlattenList.name": "tfr.keras.layers.Bilinear.name",
"tfr.keras.layers.FlattenList.name_scope": "tfr.keras.layers.Bilinear.name_scope",
"tfr.keras.layers.FlattenList.non_trainable_weights": "tfr.keras.layers.Bilinear.non_trainable_weights",
"tfr.keras.layers.FlattenList.output": "tfr.keras.layers.Bilinear.output",
+ "tfr.keras.layers.FlattenList.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.layers.FlattenList.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.layers.FlattenList.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.layers.FlattenList.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -408,7 +411,6 @@
"tfr.keras.layers.GAMLayer.__new__": "tfr.keras.layers.Bilinear.__new__",
"tfr.keras.layers.GAMLayer.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.layers.GAMLayer.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.layers.GAMLayer.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.layers.GAMLayer.build": "tfr.keras.layers.ConcatFeatures.build",
"tfr.keras.layers.GAMLayer.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.layers.GAMLayer.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
@@ -422,12 +424,14 @@
"tfr.keras.layers.GAMLayer.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.layers.GAMLayer.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.layers.GAMLayer.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.layers.GAMLayer.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.layers.GAMLayer.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.layers.GAMLayer.metrics": "tfr.keras.layers.Bilinear.metrics",
"tfr.keras.layers.GAMLayer.name": "tfr.keras.layers.Bilinear.name",
"tfr.keras.layers.GAMLayer.name_scope": "tfr.keras.layers.Bilinear.name_scope",
"tfr.keras.layers.GAMLayer.non_trainable_weights": "tfr.keras.layers.Bilinear.non_trainable_weights",
"tfr.keras.layers.GAMLayer.output": "tfr.keras.layers.Bilinear.output",
+ "tfr.keras.layers.GAMLayer.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.layers.GAMLayer.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.layers.GAMLayer.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.layers.GAMLayer.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -445,7 +449,6 @@
"tfr.keras.layers.RestoreList.__new__": "tfr.keras.layers.Bilinear.__new__",
"tfr.keras.layers.RestoreList.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.layers.RestoreList.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.layers.RestoreList.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.layers.RestoreList.build": "tfr.keras.layers.ConcatFeatures.build",
"tfr.keras.layers.RestoreList.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.layers.RestoreList.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
@@ -459,12 +462,14 @@
"tfr.keras.layers.RestoreList.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.layers.RestoreList.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.layers.RestoreList.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.layers.RestoreList.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.layers.RestoreList.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.layers.RestoreList.metrics": "tfr.keras.layers.Bilinear.metrics",
"tfr.keras.layers.RestoreList.name": "tfr.keras.layers.Bilinear.name",
"tfr.keras.layers.RestoreList.name_scope": "tfr.keras.layers.Bilinear.name_scope",
"tfr.keras.layers.RestoreList.non_trainable_weights": "tfr.keras.layers.Bilinear.non_trainable_weights",
"tfr.keras.layers.RestoreList.output": "tfr.keras.layers.Bilinear.output",
+ "tfr.keras.layers.RestoreList.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.layers.RestoreList.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.layers.RestoreList.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.layers.RestoreList.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -482,7 +487,6 @@
"tfr.keras.layers.SelfAttentionMask.__new__": "tfr.keras.layers.Bilinear.__new__",
"tfr.keras.layers.SelfAttentionMask.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.layers.SelfAttentionMask.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.layers.SelfAttentionMask.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.layers.SelfAttentionMask.build": "tfr.keras.layers.ConcatFeatures.build",
"tfr.keras.layers.SelfAttentionMask.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.layers.SelfAttentionMask.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
@@ -496,12 +500,14 @@
"tfr.keras.layers.SelfAttentionMask.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.layers.SelfAttentionMask.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.layers.SelfAttentionMask.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.layers.SelfAttentionMask.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.layers.SelfAttentionMask.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.layers.SelfAttentionMask.metrics": "tfr.keras.layers.Bilinear.metrics",
"tfr.keras.layers.SelfAttentionMask.name": "tfr.keras.layers.Bilinear.name",
"tfr.keras.layers.SelfAttentionMask.name_scope": "tfr.keras.layers.Bilinear.name_scope",
"tfr.keras.layers.SelfAttentionMask.non_trainable_weights": "tfr.keras.layers.Bilinear.non_trainable_weights",
"tfr.keras.layers.SelfAttentionMask.output": "tfr.keras.layers.Bilinear.output",
+ "tfr.keras.layers.SelfAttentionMask.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.layers.SelfAttentionMask.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.layers.SelfAttentionMask.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.layers.SelfAttentionMask.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -697,6 +703,21 @@
"tfr.keras.losses.UniqueSoftmaxLoss.__ne__": "tfr.keras.layers.Bilinear.__ne__",
"tfr.keras.losses.UniqueSoftmaxLoss.__new__": "tfr.keras.losses.ApproxMRRLoss.__new__",
"tfr.keras.losses.UniqueSoftmaxLoss.get_config": "tfr.keras.losses.ApproxMRRLoss.get_config",
+ "tfr.keras.losses.YetiDCGLambdaWeight.__eq__": "tfr.keras.layers.Bilinear.__eq__",
+ "tfr.keras.losses.YetiDCGLambdaWeight.__ge__": "tfr.keras.layers.Bilinear.__ge__",
+ "tfr.keras.losses.YetiDCGLambdaWeight.__gt__": "tfr.keras.layers.Bilinear.__gt__",
+ "tfr.keras.losses.YetiDCGLambdaWeight.__le__": "tfr.keras.layers.Bilinear.__le__",
+ "tfr.keras.losses.YetiDCGLambdaWeight.__lt__": "tfr.keras.layers.Bilinear.__lt__",
+ "tfr.keras.losses.YetiDCGLambdaWeight.__ne__": "tfr.keras.layers.Bilinear.__ne__",
+ "tfr.keras.losses.YetiDCGLambdaWeight.__new__": "tfr.keras.losses.ApproxMRRLoss.__new__",
+ "tfr.keras.losses.YetiDCGLambdaWeight.individual_weights": "tfr.keras.losses.DCGLambdaWeight.individual_weights",
+ "tfr.keras.losses.YetiLogisticLoss.__eq__": "tfr.keras.layers.Bilinear.__eq__",
+ "tfr.keras.losses.YetiLogisticLoss.__ge__": "tfr.keras.layers.Bilinear.__ge__",
+ "tfr.keras.losses.YetiLogisticLoss.__gt__": "tfr.keras.layers.Bilinear.__gt__",
+ "tfr.keras.losses.YetiLogisticLoss.__le__": "tfr.keras.layers.Bilinear.__le__",
+ "tfr.keras.losses.YetiLogisticLoss.__lt__": "tfr.keras.layers.Bilinear.__lt__",
+ "tfr.keras.losses.YetiLogisticLoss.__ne__": "tfr.keras.layers.Bilinear.__ne__",
+ "tfr.keras.losses.YetiLogisticLoss.__new__": "tfr.keras.losses.ApproxMRRLoss.__new__",
"tfr.keras.metrics.ARPMetric.__eq__": "tfr.keras.layers.Bilinear.__eq__",
"tfr.keras.metrics.ARPMetric.__ge__": "tfr.keras.layers.Bilinear.__ge__",
"tfr.keras.metrics.ARPMetric.__gt__": "tfr.keras.layers.Bilinear.__gt__",
@@ -705,7 +726,6 @@
"tfr.keras.metrics.ARPMetric.__ne__": "tfr.keras.layers.Bilinear.__ne__",
"tfr.keras.metrics.ARPMetric.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.metrics.ARPMetric.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.metrics.ARPMetric.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.metrics.ARPMetric.build": "tfr.keras.layers.ConcatFeatures.build",
"tfr.keras.metrics.ARPMetric.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.metrics.ARPMetric.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
@@ -718,11 +738,13 @@
"tfr.keras.metrics.ARPMetric.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.metrics.ARPMetric.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.metrics.ARPMetric.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.metrics.ARPMetric.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.metrics.ARPMetric.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.metrics.ARPMetric.metrics": "tfr.keras.layers.Bilinear.metrics",
"tfr.keras.metrics.ARPMetric.name": "tfr.keras.layers.Bilinear.name",
"tfr.keras.metrics.ARPMetric.name_scope": "tfr.keras.layers.Bilinear.name_scope",
"tfr.keras.metrics.ARPMetric.output": "tfr.keras.layers.Bilinear.output",
+ "tfr.keras.metrics.ARPMetric.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.metrics.ARPMetric.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.metrics.ARPMetric.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.metrics.ARPMetric.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -739,7 +761,6 @@
"tfr.keras.metrics.AlphaDCGMetric.__new__": "tfr.keras.metrics.ARPMetric.__new__",
"tfr.keras.metrics.AlphaDCGMetric.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.metrics.AlphaDCGMetric.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.metrics.AlphaDCGMetric.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.metrics.AlphaDCGMetric.build": "tfr.keras.layers.ConcatFeatures.build",
"tfr.keras.metrics.AlphaDCGMetric.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.metrics.AlphaDCGMetric.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
@@ -753,6 +774,7 @@
"tfr.keras.metrics.AlphaDCGMetric.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.metrics.AlphaDCGMetric.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.metrics.AlphaDCGMetric.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.metrics.AlphaDCGMetric.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.metrics.AlphaDCGMetric.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.metrics.AlphaDCGMetric.merge_state": "tfr.keras.metrics.ARPMetric.merge_state",
"tfr.keras.metrics.AlphaDCGMetric.metrics": "tfr.keras.layers.Bilinear.metrics",
@@ -762,6 +784,7 @@
"tfr.keras.metrics.AlphaDCGMetric.output": "tfr.keras.layers.Bilinear.output",
"tfr.keras.metrics.AlphaDCGMetric.reset_state": "tfr.keras.metrics.ARPMetric.reset_state",
"tfr.keras.metrics.AlphaDCGMetric.result": "tfr.keras.metrics.ARPMetric.result",
+ "tfr.keras.metrics.AlphaDCGMetric.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.metrics.AlphaDCGMetric.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.metrics.AlphaDCGMetric.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.metrics.AlphaDCGMetric.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -780,7 +803,6 @@
"tfr.keras.metrics.DCGMetric.__new__": "tfr.keras.metrics.ARPMetric.__new__",
"tfr.keras.metrics.DCGMetric.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.metrics.DCGMetric.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.metrics.DCGMetric.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.metrics.DCGMetric.build": "tfr.keras.layers.ConcatFeatures.build",
"tfr.keras.metrics.DCGMetric.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.metrics.DCGMetric.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
@@ -794,6 +816,7 @@
"tfr.keras.metrics.DCGMetric.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.metrics.DCGMetric.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.metrics.DCGMetric.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.metrics.DCGMetric.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.metrics.DCGMetric.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.metrics.DCGMetric.merge_state": "tfr.keras.metrics.ARPMetric.merge_state",
"tfr.keras.metrics.DCGMetric.metrics": "tfr.keras.layers.Bilinear.metrics",
@@ -803,6 +826,7 @@
"tfr.keras.metrics.DCGMetric.output": "tfr.keras.layers.Bilinear.output",
"tfr.keras.metrics.DCGMetric.reset_state": "tfr.keras.metrics.ARPMetric.reset_state",
"tfr.keras.metrics.DCGMetric.result": "tfr.keras.metrics.ARPMetric.result",
+ "tfr.keras.metrics.DCGMetric.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.metrics.DCGMetric.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.metrics.DCGMetric.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.metrics.DCGMetric.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -821,7 +845,6 @@
"tfr.keras.metrics.HitsMetric.__new__": "tfr.keras.metrics.ARPMetric.__new__",
"tfr.keras.metrics.HitsMetric.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.metrics.HitsMetric.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.metrics.HitsMetric.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.metrics.HitsMetric.build": "tfr.keras.layers.ConcatFeatures.build",
"tfr.keras.metrics.HitsMetric.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.metrics.HitsMetric.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
@@ -835,6 +858,7 @@
"tfr.keras.metrics.HitsMetric.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.metrics.HitsMetric.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.metrics.HitsMetric.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.metrics.HitsMetric.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.metrics.HitsMetric.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.metrics.HitsMetric.merge_state": "tfr.keras.metrics.ARPMetric.merge_state",
"tfr.keras.metrics.HitsMetric.metrics": "tfr.keras.layers.Bilinear.metrics",
@@ -844,6 +868,7 @@
"tfr.keras.metrics.HitsMetric.output": "tfr.keras.layers.Bilinear.output",
"tfr.keras.metrics.HitsMetric.reset_state": "tfr.keras.metrics.ARPMetric.reset_state",
"tfr.keras.metrics.HitsMetric.result": "tfr.keras.metrics.ARPMetric.result",
+ "tfr.keras.metrics.HitsMetric.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.metrics.HitsMetric.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.metrics.HitsMetric.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.metrics.HitsMetric.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -862,7 +887,6 @@
"tfr.keras.metrics.MRRMetric.__new__": "tfr.keras.metrics.ARPMetric.__new__",
"tfr.keras.metrics.MRRMetric.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.metrics.MRRMetric.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.metrics.MRRMetric.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.metrics.MRRMetric.build": "tfr.keras.layers.ConcatFeatures.build",
"tfr.keras.metrics.MRRMetric.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.metrics.MRRMetric.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
@@ -876,6 +900,7 @@
"tfr.keras.metrics.MRRMetric.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.metrics.MRRMetric.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.metrics.MRRMetric.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.metrics.MRRMetric.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.metrics.MRRMetric.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.metrics.MRRMetric.merge_state": "tfr.keras.metrics.ARPMetric.merge_state",
"tfr.keras.metrics.MRRMetric.metrics": "tfr.keras.layers.Bilinear.metrics",
@@ -885,6 +910,7 @@
"tfr.keras.metrics.MRRMetric.output": "tfr.keras.layers.Bilinear.output",
"tfr.keras.metrics.MRRMetric.reset_state": "tfr.keras.metrics.ARPMetric.reset_state",
"tfr.keras.metrics.MRRMetric.result": "tfr.keras.metrics.ARPMetric.result",
+ "tfr.keras.metrics.MRRMetric.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.metrics.MRRMetric.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.metrics.MRRMetric.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.metrics.MRRMetric.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -903,7 +929,6 @@
"tfr.keras.metrics.MeanAveragePrecisionMetric.__new__": "tfr.keras.metrics.ARPMetric.__new__",
"tfr.keras.metrics.MeanAveragePrecisionMetric.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.metrics.MeanAveragePrecisionMetric.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.metrics.MeanAveragePrecisionMetric.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.metrics.MeanAveragePrecisionMetric.build": "tfr.keras.layers.ConcatFeatures.build",
"tfr.keras.metrics.MeanAveragePrecisionMetric.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.metrics.MeanAveragePrecisionMetric.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
@@ -917,6 +942,7 @@
"tfr.keras.metrics.MeanAveragePrecisionMetric.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.metrics.MeanAveragePrecisionMetric.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.metrics.MeanAveragePrecisionMetric.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.metrics.MeanAveragePrecisionMetric.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.metrics.MeanAveragePrecisionMetric.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.metrics.MeanAveragePrecisionMetric.merge_state": "tfr.keras.metrics.ARPMetric.merge_state",
"tfr.keras.metrics.MeanAveragePrecisionMetric.metrics": "tfr.keras.layers.Bilinear.metrics",
@@ -926,6 +952,7 @@
"tfr.keras.metrics.MeanAveragePrecisionMetric.output": "tfr.keras.layers.Bilinear.output",
"tfr.keras.metrics.MeanAveragePrecisionMetric.reset_state": "tfr.keras.metrics.ARPMetric.reset_state",
"tfr.keras.metrics.MeanAveragePrecisionMetric.result": "tfr.keras.metrics.ARPMetric.result",
+ "tfr.keras.metrics.MeanAveragePrecisionMetric.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.metrics.MeanAveragePrecisionMetric.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.metrics.MeanAveragePrecisionMetric.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.metrics.MeanAveragePrecisionMetric.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -944,7 +971,6 @@
"tfr.keras.metrics.NDCGMetric.__new__": "tfr.keras.metrics.ARPMetric.__new__",
"tfr.keras.metrics.NDCGMetric.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.metrics.NDCGMetric.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.metrics.NDCGMetric.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.metrics.NDCGMetric.build": "tfr.keras.layers.ConcatFeatures.build",
"tfr.keras.metrics.NDCGMetric.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.metrics.NDCGMetric.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
@@ -958,6 +984,7 @@
"tfr.keras.metrics.NDCGMetric.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.metrics.NDCGMetric.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.metrics.NDCGMetric.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.metrics.NDCGMetric.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.metrics.NDCGMetric.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.metrics.NDCGMetric.merge_state": "tfr.keras.metrics.ARPMetric.merge_state",
"tfr.keras.metrics.NDCGMetric.metrics": "tfr.keras.layers.Bilinear.metrics",
@@ -967,6 +994,7 @@
"tfr.keras.metrics.NDCGMetric.output": "tfr.keras.layers.Bilinear.output",
"tfr.keras.metrics.NDCGMetric.reset_state": "tfr.keras.metrics.ARPMetric.reset_state",
"tfr.keras.metrics.NDCGMetric.result": "tfr.keras.metrics.ARPMetric.result",
+ "tfr.keras.metrics.NDCGMetric.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.metrics.NDCGMetric.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.metrics.NDCGMetric.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.metrics.NDCGMetric.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -985,7 +1013,6 @@
"tfr.keras.metrics.OPAMetric.__new__": "tfr.keras.metrics.ARPMetric.__new__",
"tfr.keras.metrics.OPAMetric.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.metrics.OPAMetric.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.metrics.OPAMetric.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.metrics.OPAMetric.build": "tfr.keras.layers.ConcatFeatures.build",
"tfr.keras.metrics.OPAMetric.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.metrics.OPAMetric.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
@@ -1000,6 +1027,7 @@
"tfr.keras.metrics.OPAMetric.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.metrics.OPAMetric.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.metrics.OPAMetric.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.metrics.OPAMetric.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.metrics.OPAMetric.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.metrics.OPAMetric.merge_state": "tfr.keras.metrics.ARPMetric.merge_state",
"tfr.keras.metrics.OPAMetric.metrics": "tfr.keras.layers.Bilinear.metrics",
@@ -1009,6 +1037,7 @@
"tfr.keras.metrics.OPAMetric.output": "tfr.keras.layers.Bilinear.output",
"tfr.keras.metrics.OPAMetric.reset_state": "tfr.keras.metrics.ARPMetric.reset_state",
"tfr.keras.metrics.OPAMetric.result": "tfr.keras.metrics.ARPMetric.result",
+ "tfr.keras.metrics.OPAMetric.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.metrics.OPAMetric.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.metrics.OPAMetric.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.metrics.OPAMetric.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -1027,7 +1056,6 @@
"tfr.keras.metrics.PrecisionIAMetric.__new__": "tfr.keras.metrics.ARPMetric.__new__",
"tfr.keras.metrics.PrecisionIAMetric.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.metrics.PrecisionIAMetric.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.metrics.PrecisionIAMetric.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.metrics.PrecisionIAMetric.build": "tfr.keras.layers.ConcatFeatures.build",
"tfr.keras.metrics.PrecisionIAMetric.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.metrics.PrecisionIAMetric.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
@@ -1041,6 +1069,7 @@
"tfr.keras.metrics.PrecisionIAMetric.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.metrics.PrecisionIAMetric.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.metrics.PrecisionIAMetric.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.metrics.PrecisionIAMetric.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.metrics.PrecisionIAMetric.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.metrics.PrecisionIAMetric.merge_state": "tfr.keras.metrics.ARPMetric.merge_state",
"tfr.keras.metrics.PrecisionIAMetric.metrics": "tfr.keras.layers.Bilinear.metrics",
@@ -1050,6 +1079,7 @@
"tfr.keras.metrics.PrecisionIAMetric.output": "tfr.keras.layers.Bilinear.output",
"tfr.keras.metrics.PrecisionIAMetric.reset_state": "tfr.keras.metrics.ARPMetric.reset_state",
"tfr.keras.metrics.PrecisionIAMetric.result": "tfr.keras.metrics.ARPMetric.result",
+ "tfr.keras.metrics.PrecisionIAMetric.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.metrics.PrecisionIAMetric.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.metrics.PrecisionIAMetric.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.metrics.PrecisionIAMetric.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -1068,7 +1098,6 @@
"tfr.keras.metrics.PrecisionMetric.__new__": "tfr.keras.metrics.ARPMetric.__new__",
"tfr.keras.metrics.PrecisionMetric.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.metrics.PrecisionMetric.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.metrics.PrecisionMetric.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.metrics.PrecisionMetric.build": "tfr.keras.layers.ConcatFeatures.build",
"tfr.keras.metrics.PrecisionMetric.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.metrics.PrecisionMetric.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
@@ -1082,6 +1111,7 @@
"tfr.keras.metrics.PrecisionMetric.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.metrics.PrecisionMetric.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.metrics.PrecisionMetric.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.metrics.PrecisionMetric.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.metrics.PrecisionMetric.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.metrics.PrecisionMetric.merge_state": "tfr.keras.metrics.ARPMetric.merge_state",
"tfr.keras.metrics.PrecisionMetric.metrics": "tfr.keras.layers.Bilinear.metrics",
@@ -1091,6 +1121,7 @@
"tfr.keras.metrics.PrecisionMetric.output": "tfr.keras.layers.Bilinear.output",
"tfr.keras.metrics.PrecisionMetric.reset_state": "tfr.keras.metrics.ARPMetric.reset_state",
"tfr.keras.metrics.PrecisionMetric.result": "tfr.keras.metrics.ARPMetric.result",
+ "tfr.keras.metrics.PrecisionMetric.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.metrics.PrecisionMetric.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.metrics.PrecisionMetric.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.metrics.PrecisionMetric.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -1117,7 +1148,6 @@
"tfr.keras.metrics.RecallMetric.__new__": "tfr.keras.metrics.ARPMetric.__new__",
"tfr.keras.metrics.RecallMetric.activity_regularizer": "tfr.keras.layers.Bilinear.activity_regularizer",
"tfr.keras.metrics.RecallMetric.add_loss": "tfr.keras.layers.Bilinear.add_loss",
- "tfr.keras.metrics.RecallMetric.add_metric": "tfr.keras.layers.Bilinear.add_metric",
"tfr.keras.metrics.RecallMetric.build": "tfr.keras.layers.ConcatFeatures.build",
"tfr.keras.metrics.RecallMetric.build_from_config": "tfr.keras.layers.Bilinear.build_from_config",
"tfr.keras.metrics.RecallMetric.compute_dtype": "tfr.keras.layers.Bilinear.compute_dtype",
@@ -1131,6 +1161,7 @@
"tfr.keras.metrics.RecallMetric.get_weights": "tfr.keras.layers.Bilinear.get_weights",
"tfr.keras.metrics.RecallMetric.input": "tfr.keras.layers.Bilinear.input",
"tfr.keras.metrics.RecallMetric.input_spec": "tfr.keras.layers.Bilinear.input_spec",
+ "tfr.keras.metrics.RecallMetric.load_own_variables": "tfr.keras.layers.Bilinear.load_own_variables",
"tfr.keras.metrics.RecallMetric.losses": "tfr.keras.layers.Bilinear.losses",
"tfr.keras.metrics.RecallMetric.merge_state": "tfr.keras.metrics.ARPMetric.merge_state",
"tfr.keras.metrics.RecallMetric.metrics": "tfr.keras.layers.Bilinear.metrics",
@@ -1140,6 +1171,7 @@
"tfr.keras.metrics.RecallMetric.output": "tfr.keras.layers.Bilinear.output",
"tfr.keras.metrics.RecallMetric.reset_state": "tfr.keras.metrics.ARPMetric.reset_state",
"tfr.keras.metrics.RecallMetric.result": "tfr.keras.metrics.ARPMetric.result",
+ "tfr.keras.metrics.RecallMetric.save_own_variables": "tfr.keras.layers.Bilinear.save_own_variables",
"tfr.keras.metrics.RecallMetric.set_weights": "tfr.keras.layers.Bilinear.set_weights",
"tfr.keras.metrics.RecallMetric.submodules": "tfr.keras.layers.Bilinear.submodules",
"tfr.keras.metrics.RecallMetric.supports_masking": "tfr.keras.layers.Bilinear.supports_masking",
@@ -1418,6 +1450,7 @@
"tfr.extension.premade.TFRBertConfig.__ne__": true,
"tfr.extension.premade.TFRBertConfig.__new__": true,
"tfr.extension.premade.TFRBertConfig.aggregated_metrics": true,
+ "tfr.extension.premade.TFRBertConfig.allow_image_summary": true,
"tfr.extension.premade.TFRBertConfig.as_dict": true,
"tfr.extension.premade.TFRBertConfig.default_params": true,
"tfr.extension.premade.TFRBertConfig.differential_privacy_config": true,
@@ -1454,6 +1487,7 @@
"tfr.extension.premade.TFRBertDataConfig.__new__": true,
"tfr.extension.premade.TFRBertDataConfig.apply_tf_data_service_before_batching": true,
"tfr.extension.premade.TFRBertDataConfig.as_dict": true,
+ "tfr.extension.premade.TFRBertDataConfig.autotune_algorithm": true,
"tfr.extension.premade.TFRBertDataConfig.block_length": true,
"tfr.extension.premade.TFRBertDataConfig.cache": true,
"tfr.extension.premade.TFRBertDataConfig.convert_labels_to_binary": true,
@@ -1607,6 +1641,7 @@
"tfr.extension.premade.tfrbert_task.TFRBertConfig.__ne__": true,
"tfr.extension.premade.tfrbert_task.TFRBertConfig.__new__": true,
"tfr.extension.premade.tfrbert_task.TFRBertConfig.aggregated_metrics": true,
+ "tfr.extension.premade.tfrbert_task.TFRBertConfig.allow_image_summary": true,
"tfr.extension.premade.tfrbert_task.TFRBertConfig.as_dict": true,
"tfr.extension.premade.tfrbert_task.TFRBertConfig.default_params": true,
"tfr.extension.premade.tfrbert_task.TFRBertConfig.differential_privacy_config": true,
@@ -1643,6 +1678,7 @@
"tfr.extension.premade.tfrbert_task.TFRBertDataConfig.__new__": true,
"tfr.extension.premade.tfrbert_task.TFRBertDataConfig.apply_tf_data_service_before_batching": true,
"tfr.extension.premade.tfrbert_task.TFRBertDataConfig.as_dict": true,
+ "tfr.extension.premade.tfrbert_task.TFRBertDataConfig.autotune_algorithm": true,
"tfr.extension.premade.tfrbert_task.TFRBertDataConfig.block_length": true,
"tfr.extension.premade.tfrbert_task.TFRBertDataConfig.cache": true,
"tfr.extension.premade.tfrbert_task.TFRBertDataConfig.convert_labels_to_binary": true,
@@ -1798,6 +1834,7 @@
"tfr.extension.task.RankingDataConfig.__new__": true,
"tfr.extension.task.RankingDataConfig.apply_tf_data_service_before_batching": true,
"tfr.extension.task.RankingDataConfig.as_dict": true,
+ "tfr.extension.task.RankingDataConfig.autotune_algorithm": true,
"tfr.extension.task.RankingDataConfig.block_length": true,
"tfr.extension.task.RankingDataConfig.cache": true,
"tfr.extension.task.RankingDataConfig.convert_labels_to_binary": true,
@@ -1893,6 +1930,7 @@
"tfr.extension.task.RankingTaskConfig.__ne__": true,
"tfr.extension.task.RankingTaskConfig.__new__": true,
"tfr.extension.task.RankingTaskConfig.aggregated_metrics": true,
+ "tfr.extension.task.RankingTaskConfig.allow_image_summary": true,
"tfr.extension.task.RankingTaskConfig.as_dict": true,
"tfr.extension.task.RankingTaskConfig.default_params": true,
"tfr.extension.task.RankingTaskConfig.differential_privacy_config": true,
@@ -1927,7 +1965,6 @@
"tfr.keras.layers.Bilinear.__new__": true,
"tfr.keras.layers.Bilinear.activity_regularizer": true,
"tfr.keras.layers.Bilinear.add_loss": true,
- "tfr.keras.layers.Bilinear.add_metric": true,
"tfr.keras.layers.Bilinear.build": true,
"tfr.keras.layers.Bilinear.build_from_config": true,
"tfr.keras.layers.Bilinear.compute_dtype": true,
@@ -1943,12 +1980,14 @@
"tfr.keras.layers.Bilinear.get_weights": true,
"tfr.keras.layers.Bilinear.input": true,
"tfr.keras.layers.Bilinear.input_spec": true,
+ "tfr.keras.layers.Bilinear.load_own_variables": true,
"tfr.keras.layers.Bilinear.losses": true,
"tfr.keras.layers.Bilinear.metrics": true,
"tfr.keras.layers.Bilinear.name": true,
"tfr.keras.layers.Bilinear.name_scope": true,
"tfr.keras.layers.Bilinear.non_trainable_weights": true,
"tfr.keras.layers.Bilinear.output": true,
+ "tfr.keras.layers.Bilinear.save_own_variables": true,
"tfr.keras.layers.Bilinear.set_weights": true,
"tfr.keras.layers.Bilinear.submodules": true,
"tfr.keras.layers.Bilinear.supports_masking": true,
@@ -1969,7 +2008,6 @@
"tfr.keras.layers.ConcatFeatures.__new__": true,
"tfr.keras.layers.ConcatFeatures.activity_regularizer": true,
"tfr.keras.layers.ConcatFeatures.add_loss": true,
- "tfr.keras.layers.ConcatFeatures.add_metric": true,
"tfr.keras.layers.ConcatFeatures.build": true,
"tfr.keras.layers.ConcatFeatures.build_from_config": true,
"tfr.keras.layers.ConcatFeatures.compute_dtype": true,
@@ -1985,12 +2023,14 @@
"tfr.keras.layers.ConcatFeatures.get_weights": true,
"tfr.keras.layers.ConcatFeatures.input": true,
"tfr.keras.layers.ConcatFeatures.input_spec": true,
+ "tfr.keras.layers.ConcatFeatures.load_own_variables": true,
"tfr.keras.layers.ConcatFeatures.losses": true,
"tfr.keras.layers.ConcatFeatures.metrics": true,
"tfr.keras.layers.ConcatFeatures.name": true,
"tfr.keras.layers.ConcatFeatures.name_scope": true,
"tfr.keras.layers.ConcatFeatures.non_trainable_weights": true,
"tfr.keras.layers.ConcatFeatures.output": true,
+ "tfr.keras.layers.ConcatFeatures.save_own_variables": true,
"tfr.keras.layers.ConcatFeatures.set_weights": true,
"tfr.keras.layers.ConcatFeatures.submodules": true,
"tfr.keras.layers.ConcatFeatures.supports_masking": true,
@@ -2011,7 +2051,6 @@
"tfr.keras.layers.DocumentInteractionAttention.__new__": true,
"tfr.keras.layers.DocumentInteractionAttention.activity_regularizer": true,
"tfr.keras.layers.DocumentInteractionAttention.add_loss": true,
- "tfr.keras.layers.DocumentInteractionAttention.add_metric": true,
"tfr.keras.layers.DocumentInteractionAttention.build": true,
"tfr.keras.layers.DocumentInteractionAttention.build_from_config": true,
"tfr.keras.layers.DocumentInteractionAttention.compute_dtype": true,
@@ -2027,12 +2066,14 @@
"tfr.keras.layers.DocumentInteractionAttention.get_weights": true,
"tfr.keras.layers.DocumentInteractionAttention.input": true,
"tfr.keras.layers.DocumentInteractionAttention.input_spec": true,
+ "tfr.keras.layers.DocumentInteractionAttention.load_own_variables": true,
"tfr.keras.layers.DocumentInteractionAttention.losses": true,
"tfr.keras.layers.DocumentInteractionAttention.metrics": true,
"tfr.keras.layers.DocumentInteractionAttention.name": true,
"tfr.keras.layers.DocumentInteractionAttention.name_scope": true,
"tfr.keras.layers.DocumentInteractionAttention.non_trainable_weights": true,
"tfr.keras.layers.DocumentInteractionAttention.output": true,
+ "tfr.keras.layers.DocumentInteractionAttention.save_own_variables": true,
"tfr.keras.layers.DocumentInteractionAttention.set_weights": true,
"tfr.keras.layers.DocumentInteractionAttention.submodules": true,
"tfr.keras.layers.DocumentInteractionAttention.supports_masking": true,
@@ -2053,7 +2094,6 @@
"tfr.keras.layers.FlattenList.__new__": true,
"tfr.keras.layers.FlattenList.activity_regularizer": true,
"tfr.keras.layers.FlattenList.add_loss": true,
- "tfr.keras.layers.FlattenList.add_metric": true,
"tfr.keras.layers.FlattenList.build": true,
"tfr.keras.layers.FlattenList.build_from_config": true,
"tfr.keras.layers.FlattenList.compute_dtype": true,
@@ -2069,12 +2109,14 @@
"tfr.keras.layers.FlattenList.get_weights": true,
"tfr.keras.layers.FlattenList.input": true,
"tfr.keras.layers.FlattenList.input_spec": true,
+ "tfr.keras.layers.FlattenList.load_own_variables": true,
"tfr.keras.layers.FlattenList.losses": true,
"tfr.keras.layers.FlattenList.metrics": true,
"tfr.keras.layers.FlattenList.name": true,
"tfr.keras.layers.FlattenList.name_scope": true,
"tfr.keras.layers.FlattenList.non_trainable_weights": true,
"tfr.keras.layers.FlattenList.output": true,
+ "tfr.keras.layers.FlattenList.save_own_variables": true,
"tfr.keras.layers.FlattenList.set_weights": true,
"tfr.keras.layers.FlattenList.submodules": true,
"tfr.keras.layers.FlattenList.supports_masking": true,
@@ -2095,7 +2137,6 @@
"tfr.keras.layers.GAMLayer.__new__": true,
"tfr.keras.layers.GAMLayer.activity_regularizer": true,
"tfr.keras.layers.GAMLayer.add_loss": true,
- "tfr.keras.layers.GAMLayer.add_metric": true,
"tfr.keras.layers.GAMLayer.build": true,
"tfr.keras.layers.GAMLayer.build_from_config": true,
"tfr.keras.layers.GAMLayer.compute_dtype": true,
@@ -2111,12 +2152,14 @@
"tfr.keras.layers.GAMLayer.get_weights": true,
"tfr.keras.layers.GAMLayer.input": true,
"tfr.keras.layers.GAMLayer.input_spec": true,
+ "tfr.keras.layers.GAMLayer.load_own_variables": true,
"tfr.keras.layers.GAMLayer.losses": true,
"tfr.keras.layers.GAMLayer.metrics": true,
"tfr.keras.layers.GAMLayer.name": true,
"tfr.keras.layers.GAMLayer.name_scope": true,
"tfr.keras.layers.GAMLayer.non_trainable_weights": true,
"tfr.keras.layers.GAMLayer.output": true,
+ "tfr.keras.layers.GAMLayer.save_own_variables": true,
"tfr.keras.layers.GAMLayer.set_weights": true,
"tfr.keras.layers.GAMLayer.submodules": true,
"tfr.keras.layers.GAMLayer.supports_masking": true,
@@ -2137,7 +2180,6 @@
"tfr.keras.layers.RestoreList.__new__": true,
"tfr.keras.layers.RestoreList.activity_regularizer": true,
"tfr.keras.layers.RestoreList.add_loss": true,
- "tfr.keras.layers.RestoreList.add_metric": true,
"tfr.keras.layers.RestoreList.build": true,
"tfr.keras.layers.RestoreList.build_from_config": true,
"tfr.keras.layers.RestoreList.compute_dtype": true,
@@ -2153,12 +2195,14 @@
"tfr.keras.layers.RestoreList.get_weights": true,
"tfr.keras.layers.RestoreList.input": true,
"tfr.keras.layers.RestoreList.input_spec": true,
+ "tfr.keras.layers.RestoreList.load_own_variables": true,
"tfr.keras.layers.RestoreList.losses": true,
"tfr.keras.layers.RestoreList.metrics": true,
"tfr.keras.layers.RestoreList.name": true,
"tfr.keras.layers.RestoreList.name_scope": true,
"tfr.keras.layers.RestoreList.non_trainable_weights": true,
"tfr.keras.layers.RestoreList.output": true,
+ "tfr.keras.layers.RestoreList.save_own_variables": true,
"tfr.keras.layers.RestoreList.set_weights": true,
"tfr.keras.layers.RestoreList.submodules": true,
"tfr.keras.layers.RestoreList.supports_masking": true,
@@ -2179,7 +2223,6 @@
"tfr.keras.layers.SelfAttentionMask.__new__": true,
"tfr.keras.layers.SelfAttentionMask.activity_regularizer": true,
"tfr.keras.layers.SelfAttentionMask.add_loss": true,
- "tfr.keras.layers.SelfAttentionMask.add_metric": true,
"tfr.keras.layers.SelfAttentionMask.build": true,
"tfr.keras.layers.SelfAttentionMask.build_from_config": true,
"tfr.keras.layers.SelfAttentionMask.compute_dtype": true,
@@ -2195,12 +2238,14 @@
"tfr.keras.layers.SelfAttentionMask.get_weights": true,
"tfr.keras.layers.SelfAttentionMask.input": true,
"tfr.keras.layers.SelfAttentionMask.input_spec": true,
+ "tfr.keras.layers.SelfAttentionMask.load_own_variables": true,
"tfr.keras.layers.SelfAttentionMask.losses": true,
"tfr.keras.layers.SelfAttentionMask.metrics": true,
"tfr.keras.layers.SelfAttentionMask.name": true,
"tfr.keras.layers.SelfAttentionMask.name_scope": true,
"tfr.keras.layers.SelfAttentionMask.non_trainable_weights": true,
"tfr.keras.layers.SelfAttentionMask.output": true,
+ "tfr.keras.layers.SelfAttentionMask.save_own_variables": true,
"tfr.keras.layers.SelfAttentionMask.set_weights": true,
"tfr.keras.layers.SelfAttentionMask.submodules": true,
"tfr.keras.layers.SelfAttentionMask.supports_masking": true,
@@ -2454,6 +2499,7 @@
"tfr.keras.losses.RankingLossKey.SIGMOID_CROSS_ENTROPY_LOSS": true,
"tfr.keras.losses.RankingLossKey.SOFTMAX_LOSS": true,
"tfr.keras.losses.RankingLossKey.UNIQUE_SOFTMAX_LOSS": true,
+ "tfr.keras.losses.RankingLossKey.YETI_LOGISTIC_LOSS": true,
"tfr.keras.losses.RankingLossKey.__eq__": true,
"tfr.keras.losses.RankingLossKey.__ge__": true,
"tfr.keras.losses.RankingLossKey.__gt__": true,
@@ -2499,6 +2545,30 @@
"tfr.keras.losses.UniqueSoftmaxLoss.__new__": true,
"tfr.keras.losses.UniqueSoftmaxLoss.from_config": true,
"tfr.keras.losses.UniqueSoftmaxLoss.get_config": true,
+ "tfr.keras.losses.YetiDCGLambdaWeight": false,
+ "tfr.keras.losses.YetiDCGLambdaWeight.__eq__": true,
+ "tfr.keras.losses.YetiDCGLambdaWeight.__ge__": true,
+ "tfr.keras.losses.YetiDCGLambdaWeight.__gt__": true,
+ "tfr.keras.losses.YetiDCGLambdaWeight.__init__": true,
+ "tfr.keras.losses.YetiDCGLambdaWeight.__le__": true,
+ "tfr.keras.losses.YetiDCGLambdaWeight.__lt__": true,
+ "tfr.keras.losses.YetiDCGLambdaWeight.__ne__": true,
+ "tfr.keras.losses.YetiDCGLambdaWeight.__new__": true,
+ "tfr.keras.losses.YetiDCGLambdaWeight.get_config": true,
+ "tfr.keras.losses.YetiDCGLambdaWeight.individual_weights": true,
+ "tfr.keras.losses.YetiDCGLambdaWeight.pair_weights": true,
+ "tfr.keras.losses.YetiLogisticLoss": false,
+ "tfr.keras.losses.YetiLogisticLoss.__call__": true,
+ "tfr.keras.losses.YetiLogisticLoss.__eq__": true,
+ "tfr.keras.losses.YetiLogisticLoss.__ge__": true,
+ "tfr.keras.losses.YetiLogisticLoss.__gt__": true,
+ "tfr.keras.losses.YetiLogisticLoss.__init__": true,
+ "tfr.keras.losses.YetiLogisticLoss.__le__": true,
+ "tfr.keras.losses.YetiLogisticLoss.__lt__": true,
+ "tfr.keras.losses.YetiLogisticLoss.__ne__": true,
+ "tfr.keras.losses.YetiLogisticLoss.__new__": true,
+ "tfr.keras.losses.YetiLogisticLoss.from_config": true,
+ "tfr.keras.losses.YetiLogisticLoss.get_config": true,
"tfr.keras.losses.get": false,
"tfr.keras.metrics": false,
"tfr.keras.metrics.ARPMetric": false,
@@ -2513,7 +2583,6 @@
"tfr.keras.metrics.ARPMetric.__new__": true,
"tfr.keras.metrics.ARPMetric.activity_regularizer": true,
"tfr.keras.metrics.ARPMetric.add_loss": true,
- "tfr.keras.metrics.ARPMetric.add_metric": true,
"tfr.keras.metrics.ARPMetric.build": true,
"tfr.keras.metrics.ARPMetric.build_from_config": true,
"tfr.keras.metrics.ARPMetric.compute_dtype": true,
@@ -2529,6 +2598,7 @@
"tfr.keras.metrics.ARPMetric.get_weights": true,
"tfr.keras.metrics.ARPMetric.input": true,
"tfr.keras.metrics.ARPMetric.input_spec": true,
+ "tfr.keras.metrics.ARPMetric.load_own_variables": true,
"tfr.keras.metrics.ARPMetric.losses": true,
"tfr.keras.metrics.ARPMetric.merge_state": true,
"tfr.keras.metrics.ARPMetric.metrics": true,
@@ -2538,6 +2608,7 @@
"tfr.keras.metrics.ARPMetric.output": true,
"tfr.keras.metrics.ARPMetric.reset_state": true,
"tfr.keras.metrics.ARPMetric.result": true,
+ "tfr.keras.metrics.ARPMetric.save_own_variables": true,
"tfr.keras.metrics.ARPMetric.set_weights": true,
"tfr.keras.metrics.ARPMetric.submodules": true,
"tfr.keras.metrics.ARPMetric.supports_masking": true,
@@ -2559,7 +2630,6 @@
"tfr.keras.metrics.AlphaDCGMetric.__new__": true,
"tfr.keras.metrics.AlphaDCGMetric.activity_regularizer": true,
"tfr.keras.metrics.AlphaDCGMetric.add_loss": true,
- "tfr.keras.metrics.AlphaDCGMetric.add_metric": true,
"tfr.keras.metrics.AlphaDCGMetric.build": true,
"tfr.keras.metrics.AlphaDCGMetric.build_from_config": true,
"tfr.keras.metrics.AlphaDCGMetric.compute_dtype": true,
@@ -2575,6 +2645,7 @@
"tfr.keras.metrics.AlphaDCGMetric.get_weights": true,
"tfr.keras.metrics.AlphaDCGMetric.input": true,
"tfr.keras.metrics.AlphaDCGMetric.input_spec": true,
+ "tfr.keras.metrics.AlphaDCGMetric.load_own_variables": true,
"tfr.keras.metrics.AlphaDCGMetric.losses": true,
"tfr.keras.metrics.AlphaDCGMetric.merge_state": true,
"tfr.keras.metrics.AlphaDCGMetric.metrics": true,
@@ -2584,6 +2655,7 @@
"tfr.keras.metrics.AlphaDCGMetric.output": true,
"tfr.keras.metrics.AlphaDCGMetric.reset_state": true,
"tfr.keras.metrics.AlphaDCGMetric.result": true,
+ "tfr.keras.metrics.AlphaDCGMetric.save_own_variables": true,
"tfr.keras.metrics.AlphaDCGMetric.set_weights": true,
"tfr.keras.metrics.AlphaDCGMetric.submodules": true,
"tfr.keras.metrics.AlphaDCGMetric.supports_masking": true,
@@ -2605,7 +2677,6 @@
"tfr.keras.metrics.DCGMetric.__new__": true,
"tfr.keras.metrics.DCGMetric.activity_regularizer": true,
"tfr.keras.metrics.DCGMetric.add_loss": true,
- "tfr.keras.metrics.DCGMetric.add_metric": true,
"tfr.keras.metrics.DCGMetric.build": true,
"tfr.keras.metrics.DCGMetric.build_from_config": true,
"tfr.keras.metrics.DCGMetric.compute_dtype": true,
@@ -2621,6 +2692,7 @@
"tfr.keras.metrics.DCGMetric.get_weights": true,
"tfr.keras.metrics.DCGMetric.input": true,
"tfr.keras.metrics.DCGMetric.input_spec": true,
+ "tfr.keras.metrics.DCGMetric.load_own_variables": true,
"tfr.keras.metrics.DCGMetric.losses": true,
"tfr.keras.metrics.DCGMetric.merge_state": true,
"tfr.keras.metrics.DCGMetric.metrics": true,
@@ -2630,6 +2702,7 @@
"tfr.keras.metrics.DCGMetric.output": true,
"tfr.keras.metrics.DCGMetric.reset_state": true,
"tfr.keras.metrics.DCGMetric.result": true,
+ "tfr.keras.metrics.DCGMetric.save_own_variables": true,
"tfr.keras.metrics.DCGMetric.set_weights": true,
"tfr.keras.metrics.DCGMetric.submodules": true,
"tfr.keras.metrics.DCGMetric.supports_masking": true,
@@ -2651,7 +2724,6 @@
"tfr.keras.metrics.HitsMetric.__new__": true,
"tfr.keras.metrics.HitsMetric.activity_regularizer": true,
"tfr.keras.metrics.HitsMetric.add_loss": true,
- "tfr.keras.metrics.HitsMetric.add_metric": true,
"tfr.keras.metrics.HitsMetric.build": true,
"tfr.keras.metrics.HitsMetric.build_from_config": true,
"tfr.keras.metrics.HitsMetric.compute_dtype": true,
@@ -2667,6 +2739,7 @@
"tfr.keras.metrics.HitsMetric.get_weights": true,
"tfr.keras.metrics.HitsMetric.input": true,
"tfr.keras.metrics.HitsMetric.input_spec": true,
+ "tfr.keras.metrics.HitsMetric.load_own_variables": true,
"tfr.keras.metrics.HitsMetric.losses": true,
"tfr.keras.metrics.HitsMetric.merge_state": true,
"tfr.keras.metrics.HitsMetric.metrics": true,
@@ -2676,6 +2749,7 @@
"tfr.keras.metrics.HitsMetric.output": true,
"tfr.keras.metrics.HitsMetric.reset_state": true,
"tfr.keras.metrics.HitsMetric.result": true,
+ "tfr.keras.metrics.HitsMetric.save_own_variables": true,
"tfr.keras.metrics.HitsMetric.set_weights": true,
"tfr.keras.metrics.HitsMetric.submodules": true,
"tfr.keras.metrics.HitsMetric.supports_masking": true,
@@ -2697,7 +2771,6 @@
"tfr.keras.metrics.MRRMetric.__new__": true,
"tfr.keras.metrics.MRRMetric.activity_regularizer": true,
"tfr.keras.metrics.MRRMetric.add_loss": true,
- "tfr.keras.metrics.MRRMetric.add_metric": true,
"tfr.keras.metrics.MRRMetric.build": true,
"tfr.keras.metrics.MRRMetric.build_from_config": true,
"tfr.keras.metrics.MRRMetric.compute_dtype": true,
@@ -2713,6 +2786,7 @@
"tfr.keras.metrics.MRRMetric.get_weights": true,
"tfr.keras.metrics.MRRMetric.input": true,
"tfr.keras.metrics.MRRMetric.input_spec": true,
+ "tfr.keras.metrics.MRRMetric.load_own_variables": true,
"tfr.keras.metrics.MRRMetric.losses": true,
"tfr.keras.metrics.MRRMetric.merge_state": true,
"tfr.keras.metrics.MRRMetric.metrics": true,
@@ -2722,6 +2796,7 @@
"tfr.keras.metrics.MRRMetric.output": true,
"tfr.keras.metrics.MRRMetric.reset_state": true,
"tfr.keras.metrics.MRRMetric.result": true,
+ "tfr.keras.metrics.MRRMetric.save_own_variables": true,
"tfr.keras.metrics.MRRMetric.set_weights": true,
"tfr.keras.metrics.MRRMetric.submodules": true,
"tfr.keras.metrics.MRRMetric.supports_masking": true,
@@ -2743,7 +2818,6 @@
"tfr.keras.metrics.MeanAveragePrecisionMetric.__new__": true,
"tfr.keras.metrics.MeanAveragePrecisionMetric.activity_regularizer": true,
"tfr.keras.metrics.MeanAveragePrecisionMetric.add_loss": true,
- "tfr.keras.metrics.MeanAveragePrecisionMetric.add_metric": true,
"tfr.keras.metrics.MeanAveragePrecisionMetric.build": true,
"tfr.keras.metrics.MeanAveragePrecisionMetric.build_from_config": true,
"tfr.keras.metrics.MeanAveragePrecisionMetric.compute_dtype": true,
@@ -2759,6 +2833,7 @@
"tfr.keras.metrics.MeanAveragePrecisionMetric.get_weights": true,
"tfr.keras.metrics.MeanAveragePrecisionMetric.input": true,
"tfr.keras.metrics.MeanAveragePrecisionMetric.input_spec": true,
+ "tfr.keras.metrics.MeanAveragePrecisionMetric.load_own_variables": true,
"tfr.keras.metrics.MeanAveragePrecisionMetric.losses": true,
"tfr.keras.metrics.MeanAveragePrecisionMetric.merge_state": true,
"tfr.keras.metrics.MeanAveragePrecisionMetric.metrics": true,
@@ -2768,6 +2843,7 @@
"tfr.keras.metrics.MeanAveragePrecisionMetric.output": true,
"tfr.keras.metrics.MeanAveragePrecisionMetric.reset_state": true,
"tfr.keras.metrics.MeanAveragePrecisionMetric.result": true,
+ "tfr.keras.metrics.MeanAveragePrecisionMetric.save_own_variables": true,
"tfr.keras.metrics.MeanAveragePrecisionMetric.set_weights": true,
"tfr.keras.metrics.MeanAveragePrecisionMetric.submodules": true,
"tfr.keras.metrics.MeanAveragePrecisionMetric.supports_masking": true,
@@ -2789,7 +2865,6 @@
"tfr.keras.metrics.NDCGMetric.__new__": true,
"tfr.keras.metrics.NDCGMetric.activity_regularizer": true,
"tfr.keras.metrics.NDCGMetric.add_loss": true,
- "tfr.keras.metrics.NDCGMetric.add_metric": true,
"tfr.keras.metrics.NDCGMetric.build": true,
"tfr.keras.metrics.NDCGMetric.build_from_config": true,
"tfr.keras.metrics.NDCGMetric.compute_dtype": true,
@@ -2805,6 +2880,7 @@
"tfr.keras.metrics.NDCGMetric.get_weights": true,
"tfr.keras.metrics.NDCGMetric.input": true,
"tfr.keras.metrics.NDCGMetric.input_spec": true,
+ "tfr.keras.metrics.NDCGMetric.load_own_variables": true,
"tfr.keras.metrics.NDCGMetric.losses": true,
"tfr.keras.metrics.NDCGMetric.merge_state": true,
"tfr.keras.metrics.NDCGMetric.metrics": true,
@@ -2814,6 +2890,7 @@
"tfr.keras.metrics.NDCGMetric.output": true,
"tfr.keras.metrics.NDCGMetric.reset_state": true,
"tfr.keras.metrics.NDCGMetric.result": true,
+ "tfr.keras.metrics.NDCGMetric.save_own_variables": true,
"tfr.keras.metrics.NDCGMetric.set_weights": true,
"tfr.keras.metrics.NDCGMetric.submodules": true,
"tfr.keras.metrics.NDCGMetric.supports_masking": true,
@@ -2835,7 +2912,6 @@
"tfr.keras.metrics.OPAMetric.__new__": true,
"tfr.keras.metrics.OPAMetric.activity_regularizer": true,
"tfr.keras.metrics.OPAMetric.add_loss": true,
- "tfr.keras.metrics.OPAMetric.add_metric": true,
"tfr.keras.metrics.OPAMetric.build": true,
"tfr.keras.metrics.OPAMetric.build_from_config": true,
"tfr.keras.metrics.OPAMetric.compute_dtype": true,
@@ -2851,6 +2927,7 @@
"tfr.keras.metrics.OPAMetric.get_weights": true,
"tfr.keras.metrics.OPAMetric.input": true,
"tfr.keras.metrics.OPAMetric.input_spec": true,
+ "tfr.keras.metrics.OPAMetric.load_own_variables": true,
"tfr.keras.metrics.OPAMetric.losses": true,
"tfr.keras.metrics.OPAMetric.merge_state": true,
"tfr.keras.metrics.OPAMetric.metrics": true,
@@ -2860,6 +2937,7 @@
"tfr.keras.metrics.OPAMetric.output": true,
"tfr.keras.metrics.OPAMetric.reset_state": true,
"tfr.keras.metrics.OPAMetric.result": true,
+ "tfr.keras.metrics.OPAMetric.save_own_variables": true,
"tfr.keras.metrics.OPAMetric.set_weights": true,
"tfr.keras.metrics.OPAMetric.submodules": true,
"tfr.keras.metrics.OPAMetric.supports_masking": true,
@@ -2881,7 +2959,6 @@
"tfr.keras.metrics.PrecisionIAMetric.__new__": true,
"tfr.keras.metrics.PrecisionIAMetric.activity_regularizer": true,
"tfr.keras.metrics.PrecisionIAMetric.add_loss": true,
- "tfr.keras.metrics.PrecisionIAMetric.add_metric": true,
"tfr.keras.metrics.PrecisionIAMetric.build": true,
"tfr.keras.metrics.PrecisionIAMetric.build_from_config": true,
"tfr.keras.metrics.PrecisionIAMetric.compute_dtype": true,
@@ -2897,6 +2974,7 @@
"tfr.keras.metrics.PrecisionIAMetric.get_weights": true,
"tfr.keras.metrics.PrecisionIAMetric.input": true,
"tfr.keras.metrics.PrecisionIAMetric.input_spec": true,
+ "tfr.keras.metrics.PrecisionIAMetric.load_own_variables": true,
"tfr.keras.metrics.PrecisionIAMetric.losses": true,
"tfr.keras.metrics.PrecisionIAMetric.merge_state": true,
"tfr.keras.metrics.PrecisionIAMetric.metrics": true,
@@ -2906,6 +2984,7 @@
"tfr.keras.metrics.PrecisionIAMetric.output": true,
"tfr.keras.metrics.PrecisionIAMetric.reset_state": true,
"tfr.keras.metrics.PrecisionIAMetric.result": true,
+ "tfr.keras.metrics.PrecisionIAMetric.save_own_variables": true,
"tfr.keras.metrics.PrecisionIAMetric.set_weights": true,
"tfr.keras.metrics.PrecisionIAMetric.submodules": true,
"tfr.keras.metrics.PrecisionIAMetric.supports_masking": true,
@@ -2927,7 +3006,6 @@
"tfr.keras.metrics.PrecisionMetric.__new__": true,
"tfr.keras.metrics.PrecisionMetric.activity_regularizer": true,
"tfr.keras.metrics.PrecisionMetric.add_loss": true,
- "tfr.keras.metrics.PrecisionMetric.add_metric": true,
"tfr.keras.metrics.PrecisionMetric.build": true,
"tfr.keras.metrics.PrecisionMetric.build_from_config": true,
"tfr.keras.metrics.PrecisionMetric.compute_dtype": true,
@@ -2943,6 +3021,7 @@
"tfr.keras.metrics.PrecisionMetric.get_weights": true,
"tfr.keras.metrics.PrecisionMetric.input": true,
"tfr.keras.metrics.PrecisionMetric.input_spec": true,
+ "tfr.keras.metrics.PrecisionMetric.load_own_variables": true,
"tfr.keras.metrics.PrecisionMetric.losses": true,
"tfr.keras.metrics.PrecisionMetric.merge_state": true,
"tfr.keras.metrics.PrecisionMetric.metrics": true,
@@ -2952,6 +3031,7 @@
"tfr.keras.metrics.PrecisionMetric.output": true,
"tfr.keras.metrics.PrecisionMetric.reset_state": true,
"tfr.keras.metrics.PrecisionMetric.result": true,
+ "tfr.keras.metrics.PrecisionMetric.save_own_variables": true,
"tfr.keras.metrics.PrecisionMetric.set_weights": true,
"tfr.keras.metrics.PrecisionMetric.submodules": true,
"tfr.keras.metrics.PrecisionMetric.supports_masking": true,
@@ -2992,7 +3072,6 @@
"tfr.keras.metrics.RecallMetric.__new__": true,
"tfr.keras.metrics.RecallMetric.activity_regularizer": true,
"tfr.keras.metrics.RecallMetric.add_loss": true,
- "tfr.keras.metrics.RecallMetric.add_metric": true,
"tfr.keras.metrics.RecallMetric.build": true,
"tfr.keras.metrics.RecallMetric.build_from_config": true,
"tfr.keras.metrics.RecallMetric.compute_dtype": true,
@@ -3008,6 +3087,7 @@
"tfr.keras.metrics.RecallMetric.get_weights": true,
"tfr.keras.metrics.RecallMetric.input": true,
"tfr.keras.metrics.RecallMetric.input_spec": true,
+ "tfr.keras.metrics.RecallMetric.load_own_variables": true,
"tfr.keras.metrics.RecallMetric.losses": true,
"tfr.keras.metrics.RecallMetric.merge_state": true,
"tfr.keras.metrics.RecallMetric.metrics": true,
@@ -3017,6 +3097,7 @@
"tfr.keras.metrics.RecallMetric.output": true,
"tfr.keras.metrics.RecallMetric.reset_state": true,
"tfr.keras.metrics.RecallMetric.result": true,
+ "tfr.keras.metrics.RecallMetric.save_own_variables": true,
"tfr.keras.metrics.RecallMetric.set_weights": true,
"tfr.keras.metrics.RecallMetric.submodules": true,
"tfr.keras.metrics.RecallMetric.supports_masking": true,
@@ -3422,11 +3503,13 @@
"tfr.keras.utils.PositiveFunction": false,
"tfr.keras.utils.RankDiscountFunction": false,
"tfr.keras.utils.TensorLike": false,
+ "tfr.keras.utils.deserialize_keras_object": false,
"tfr.keras.utils.identity": false,
"tfr.keras.utils.inverse": false,
"tfr.keras.utils.is_greater_equal_1": false,
"tfr.keras.utils.log2_inverse": false,
"tfr.keras.utils.pow_minus_1": false,
+ "tfr.keras.utils.serialize_keras_object": false,
"tfr.keras.utils.symmetric_log1p": false,
"tfr.utils": false,
"tfr.utils.LossFunction": false,
diff --git a/docs/api_docs/python/tfr/_toc.yaml b/docs/api_docs/python/tfr/_toc.yaml
index 3505887..300540d 100644
--- a/docs/api_docs/python/tfr/_toc.yaml
+++ b/docs/api_docs/python/tfr/_toc.yaml
@@ -153,6 +153,10 @@ toc:
path: /ranking/api_docs/python/tfr/keras/losses/SoftmaxLoss
- title: UniqueSoftmaxLoss
path: /ranking/api_docs/python/tfr/keras/losses/UniqueSoftmaxLoss
+ - title: YetiDCGLambdaWeight
+ path: /ranking/api_docs/python/tfr/keras/losses/YetiDCGLambdaWeight
+ - title: YetiLogisticLoss
+ path: /ranking/api_docs/python/tfr/keras/losses/YetiLogisticLoss
- title: get
path: /ranking/api_docs/python/tfr/keras/losses/get
- title: metrics
@@ -273,6 +277,8 @@ toc:
path: /ranking/api_docs/python/tfr/keras/utils
- title: GainFunction
path: /ranking/api_docs/python/tfr/keras/utils/GainFunction
+ - title: deserialize_keras_object
+ path: /ranking/api_docs/python/tfr/keras/utils/deserialize_keras_object
- title: identity
path: /ranking/api_docs/python/tfr/keras/utils/identity
- title: inverse
@@ -283,6 +289,8 @@ toc:
path: /ranking/api_docs/python/tfr/keras/utils/log2_inverse
- title: pow_minus_1
path: /ranking/api_docs/python/tfr/keras/utils/pow_minus_1
+ - title: serialize_keras_object
+ path: /ranking/api_docs/python/tfr/keras/utils/serialize_keras_object
- title: symmetric_log1p
path: /ranking/api_docs/python/tfr/keras/utils/symmetric_log1p
- title: tfr.utils
diff --git a/docs/api_docs/python/tfr/all_symbols.md b/docs/api_docs/python/tfr/all_symbols.md
index ae880dc..1195512 100644
--- a/docs/api_docs/python/tfr/all_symbols.md
+++ b/docs/api_docs/python/tfr/all_symbols.md
@@ -81,6 +81,8 @@
* tfr.keras.losses.SigmoidCrossEntropyLoss
* tfr.keras.losses.SoftmaxLoss
* tfr.keras.losses.UniqueSoftmaxLoss
+* tfr.keras.losses.YetiDCGLambdaWeight
+* tfr.keras.losses.YetiLogisticLoss
* tfr.keras.losses.get
* tfr.keras.metrics
* tfr.keras.metrics.ARPMetric
@@ -138,11 +140,13 @@
* tfr.keras.utils.PositiveFunction
* tfr.keras.utils.RankDiscountFunction
* tfr.keras.utils.TensorLike
+* tfr.keras.utils.deserialize_keras_object
* tfr.keras.utils.identity
* tfr.keras.utils.inverse
* tfr.keras.utils.is_greater_equal_1
* tfr.keras.utils.log2_inverse
* tfr.keras.utils.pow_minus_1
+* tfr.keras.utils.serialize_keras_object
* tfr.keras.utils.symmetric_log1p
* tfr.utils
* tfr.utils.LossFunction
diff --git a/docs/api_docs/python/tfr/api_report.pb b/docs/api_docs/python/tfr/api_report.pb
index c8e1ebe..0fef1f1 100644
Binary files a/docs/api_docs/python/tfr/api_report.pb and b/docs/api_docs/python/tfr/api_report.pb differ
diff --git a/docs/api_docs/python/tfr/data/parse_from_example_in_example.md b/docs/api_docs/python/tfr/data/parse_from_example_in_example.md
index ad0d5d7..3c5cd11 100644
--- a/docs/api_docs/python/tfr/data/parse_from_example_in_example.md
+++ b/docs/api_docs/python/tfr/data/parse_from_example_in_example.md
@@ -146,17 +146,14 @@ And the expected output is:
```python
{
- "unigrams":
- SparseTensor(
- indices=array([[0, 0, 0], [0, 1, 0], [0, 1, 1], [0, 1, 2],
- [1, 0, 0], [1, 1, 0], [1, 1, 1]]),
- values=[
- "tensorflow", "learning", "to", "rank", "gbdt", "neural",
- "network"
- ],
- dense_shape=array([2, 2, 3])),
- "utility": [[[0.], [1.]], [[0.], [1.]]],
- "query_length": [[3], [2]],
+ "unigrams": SparseTensor(
+ indices=array([[0, 0, 0], [0, 1, 0], [0, 1, 1], [0, 1, 2], [1, 0, 0],
+ [1, 1, 0], [1, 1, 1]]),
+ values=["tensorflow", "learning", "to", "rank", "gbdt", "neural" ,
+ "network"],
+ dense_shape=array([2, 2, 3])),
+ "utility": [[[ 0.], [ 1.]], [[ 0.], [ 1.]]],
+ "query_length": [[3], [2]],
}
```
diff --git a/docs/api_docs/python/tfr/data/parse_from_example_list.md b/docs/api_docs/python/tfr/data/parse_from_example_list.md
index 6d61a3e..79c6566 100644
--- a/docs/api_docs/python/tfr/data/parse_from_example_list.md
+++ b/docs/api_docs/python/tfr/data/parse_from_example_list.md
@@ -126,17 +126,14 @@ And the expected output is:
```python
{
- "unigrams":
- SparseTensor(
- indices=array([[0, 0, 0], [0, 1, 0], [0, 1, 1], [0, 1, 2],
- [1, 0, 0], [1, 1, 0], [1, 1, 1]]),
- values=[
- "tensorflow", "learning", "to", "rank", "gbdt", "neural",
- "network"
- ],
- dense_shape=array([2, 2, 3])),
- "utility": [[[0.], [1.]], [[0.], [1.]]],
- "query_length": [[3], [2]],
+ "unigrams": SparseTensor(
+ indices=array([[0, 0, 0], [0, 1, 0], [0, 1, 1], [0, 1, 2], [1, 0, 0],
+ [1, 1, 0], [1, 1, 1]]),
+ values=["tensorflow", "learning", "to", "rank", "gbdt", "neural" ,
+ "network"],
+ dense_shape=array([2, 2, 3])),
+ "utility": [[[ 0.], [ 1.]], [[ 0.], [ 1.]]],
+ "query_length": [[3], [2]],
}
```
diff --git a/docs/api_docs/python/tfr/data/parse_from_sequence_example.md b/docs/api_docs/python/tfr/data/parse_from_sequence_example.md
index 4460f39..bf1a3c8 100644
--- a/docs/api_docs/python/tfr/data/parse_from_sequence_example.md
+++ b/docs/api_docs/python/tfr/data/parse_from_sequence_example.md
@@ -119,14 +119,13 @@ And the expected output is:
```python
{
- "unigrams":
- SparseTensor(
- indices=array([[0, 0, 0], [0, 1, 0], [0, 1, 1], [0, 1, 2],
- [1, 0, 0], [1, 1, 0], [1, 1, 1]]),
- values=["tensorflow", "learning", "to", "rank", "gbdt"],
- dense_shape=array([2, 2, 3])),
- "utility": [[[0.], [1.]], [[0.], [0.]]],
- "query_length": [[3], [2]],
+ "unigrams": SparseTensor(
+ indices=array([[0, 0, 0], [0, 1, 0], [0, 1, 1], [0, 1, 2], [1, 0, 0], [1,
+ 1, 0], [1, 1, 1]]),
+ values=["tensorflow", "learning", "to", "rank", "gbdt"],
+ dense_shape=array([2, 2, 3])),
+ "utility": [[[ 0.], [ 1.]], [[ 0.], [ 0.]]],
+ "query_length": [[3], [2]],
}
```
diff --git a/docs/api_docs/python/tfr/extension/premade/TFRBertConfig.md b/docs/api_docs/python/tfr/extension/premade/TFRBertConfig.md
index 87c1a9c..fd482c7 100644
--- a/docs/api_docs/python/tfr/extension/premade/TFRBertConfig.md
+++ b/docs/api_docs/python/tfr/extension/premade/TFRBertConfig.md
@@ -19,6 +19,7 @@ description: The tf-ranking BERT task config.
+
@@ -68,6 +69,7 @@ Inherits From:
validation_data: tfr.extension.task.RankingDataConfig
= None,
name: Optional[str] = None,
differential_privacy_config: Optional[dp_configs.DifferentialPrivacyConfig] = None,
+ allow_image_summary: bool = False,
loss: str = 'softmax_loss',
loss_reduction: str = tf.keras.losses.Reduction.NONE,
aggregated_metrics: bool = False,
@@ -78,7 +80,6 @@ Inherits From:
-
Args |
@@ -105,7 +106,6 @@ operators, including {'==', '!=', '<', '<=', '>', '>='}.
-
Attributes |
@@ -171,6 +171,13 @@ Dataclass field
+`allow_image_summary`
+ |
+
+Dataclass field
+ |
+
+
`loss`
|
@@ -424,6 +431,13 @@ aggregated_metrics
|
+allow_image_summary
+ |
+
+`False`
+ |
+
+
default_params
|
diff --git a/docs/api_docs/python/tfr/extension/premade/TFRBertDataConfig.md b/docs/api_docs/python/tfr/extension/premade/TFRBertDataConfig.md
index e50689c..6671bba 100644
--- a/docs/api_docs/python/tfr/extension/premade/TFRBertDataConfig.md
+++ b/docs/api_docs/python/tfr/extension/premade/TFRBertDataConfig.md
@@ -19,6 +19,7 @@ description: Data config for TFR-BERT task.
+
@@ -107,6 +108,7 @@ Inherits From:
trainer_id: Optional[str] = None,
seed: Optional[int] = None,
prefetch_buffer_size: Optional[int] = None,
+ autotune_algorithm: Optional[str] = None,
data_format: str = tfr_data.ELWC,
dataset_fn: str = 'tfrecord',
list_size: Optional[int] = None,
@@ -122,7 +124,6 @@ Inherits From:
-
Args |
@@ -149,7 +150,6 @@ operators, including {'==', '!=', '<', '<=', '>', '>='}.
-
Attributes |
@@ -334,6 +334,13 @@ Dataclass field
+`autotune_algorithm`
+ |
+
+Dataclass field
+ |
+
+
`data_format`
|
@@ -622,6 +629,13 @@ apply_tf_data_service_before_batching
+ |
+
+`None`
+ |
+
+
block_length
|
diff --git a/docs/api_docs/python/tfr/extension/premade/TFRBertModelConfig.md b/docs/api_docs/python/tfr/extension/premade/TFRBertModelConfig.md
index ba08b3d..d584407 100644
--- a/docs/api_docs/python/tfr/extension/premade/TFRBertModelConfig.md
+++ b/docs/api_docs/python/tfr/extension/premade/TFRBertModelConfig.md
@@ -59,7 +59,6 @@ A TFR-BERT model configuration.
-
Args |
@@ -86,7 +85,6 @@ operators, including {'==', '!=', '<', '<=', '>', '>='}.
-
Attributes |
diff --git a/docs/api_docs/python/tfr/extension/premade/TensorDict.md b/docs/api_docs/python/tfr/extension/premade/TensorDict.md
index 495b2f4..aa049ef 100644
--- a/docs/api_docs/python/tfr/extension/premade/TensorDict.md
+++ b/docs/api_docs/python/tfr/extension/premade/TensorDict.md
@@ -6,7 +6,6 @@
# tfr.extension.premade.TensorDict
-
This symbol is a **type alias**.
#### Source:
diff --git a/docs/api_docs/python/tfr/extension/task/FeatureSpec.md b/docs/api_docs/python/tfr/extension/task/FeatureSpec.md
index 3cf59ae..3574194 100644
--- a/docs/api_docs/python/tfr/extension/task/FeatureSpec.md
+++ b/docs/api_docs/python/tfr/extension/task/FeatureSpec.md
@@ -6,7 +6,6 @@
# tfr.extension.task.FeatureSpec
-
This symbol is a **type alias**.
#### Source:
diff --git a/docs/api_docs/python/tfr/extension/task/RankingDataConfig.md b/docs/api_docs/python/tfr/extension/task/RankingDataConfig.md
index d3ce606..3a84f30 100644
--- a/docs/api_docs/python/tfr/extension/task/RankingDataConfig.md
+++ b/docs/api_docs/python/tfr/extension/task/RankingDataConfig.md
@@ -19,6 +19,7 @@ description: Data set config.
+
@@ -93,6 +94,7 @@ Data set config.
trainer_id: Optional[str] = None,
seed: Optional[int] = None,
prefetch_buffer_size: Optional[int] = None,
+ autotune_algorithm: Optional[str] = None,
data_format: str = tfr_data.ELWC,
dataset_fn: str = 'tfrecord',
list_size: Optional[int] = None,
@@ -105,7 +107,6 @@ Data set config.
-
Args |
@@ -132,7 +133,6 @@ operators, including {'==', '!=', '<', '<=', '>', '>='}.
-
Attributes |
@@ -317,6 +317,13 @@ Dataclass field
+`autotune_algorithm`
+ |
+
+Dataclass field
+ |
+
+
`data_format`
|
@@ -584,6 +591,13 @@ apply_tf_data_service_before_batching
+ |
+
+`None`
+ |
+
+
block_length
|
diff --git a/docs/api_docs/python/tfr/extension/task/RankingTaskConfig.md b/docs/api_docs/python/tfr/extension/task/RankingTaskConfig.md
index 3fd85c5..3b52a78 100644
--- a/docs/api_docs/python/tfr/extension/task/RankingTaskConfig.md
+++ b/docs/api_docs/python/tfr/extension/task/RankingTaskConfig.md
@@ -19,6 +19,7 @@ description: The TF-Ranking task config.
+
@@ -57,6 +58,7 @@ The TF-Ranking task config.
validation_data: tfr.extension.task.RankingDataConfig = None,
name: Optional[str] = None,
differential_privacy_config: Optional[dp_configs.DifferentialPrivacyConfig] = None,
+ allow_image_summary: bool = False,
loss: str = 'softmax_loss',
loss_reduction: str = tf.keras.losses.Reduction.NONE,
aggregated_metrics: bool = False,
@@ -67,7 +69,6 @@ The TF-Ranking task config.
-
Args |
@@ -94,7 +95,6 @@ operators, including {'==', '!=', '<', '<=', '>', '>='}.
-
Attributes |
@@ -160,6 +160,13 @@ Dataclass field
+`allow_image_summary`
+ |
+
+Dataclass field
+ |
+
+
`loss`
|
@@ -413,6 +420,13 @@ aggregated_metrics
|
+allow_image_summary
+ |
+
+`False`
+ |
+
+
default_params
|
diff --git a/docs/api_docs/python/tfr/keras/layers/Bilinear.md b/docs/api_docs/python/tfr/keras/layers/Bilinear.md
index 08e5288..94e45f1 100644
--- a/docs/api_docs/python/tfr/keras/layers/Bilinear.md
+++ b/docs/api_docs/python/tfr/keras/layers/Bilinear.md
@@ -7,7 +7,6 @@ description: A Keras Layer makes bilinear interaction of two vectors.
-
@@ -17,6 +16,8 @@ description: A Keras Layer makes bilinear interaction of two vectors.
+
+
@@ -187,20 +188,8 @@ gradients back to the corresponding variables.
```
| `metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- | `name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. | `name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -347,55 +336,33 @@ Used for backwards compatibility only.
|
-add_metric
+build
+
+View
+source
-add_metric(
- value, name=None, **kwargs
+build(
+ input_shape: tf.TensorShape
)
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
+See tf.keras.layers.Layer.
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
+build_from_config
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
+
+build_from_config(
+ config
+)
+
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
+Builds the layer's states with the supplied config dict.
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
@@ -404,53 +371,14 @@ model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
+`config`
|
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
+Dict containing the input shape associated with this layer.
|
-build
-
-View
-source
-
-
-build(
- input_shape: tf.TensorShape
-)
-
-
-See tf.keras.layers.Layer.
-
-build_from_config
-
-
-build_from_config(
- config
-)
-
-
compute_mask
@@ -594,6 +522,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -655,6 +607,64 @@ Weights values as a list of NumPy arrays.
|
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/layers/ConcatFeatures.md b/docs/api_docs/python/tfr/keras/layers/ConcatFeatures.md
index 13f8a58..21ab943 100644
--- a/docs/api_docs/python/tfr/keras/layers/ConcatFeatures.md
+++ b/docs/api_docs/python/tfr/keras/layers/ConcatFeatures.md
@@ -8,7 +8,6 @@ manner.
-
@@ -18,6 +17,8 @@ manner.
+
+
@@ -213,20 +214,8 @@ gradients back to the corresponding variables.
```
`metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- | `name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. | `name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -373,89 +362,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -498,6 +404,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -674,6 +603,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -758,6 +711,64 @@ Weights values as a list of NumPy arrays.
|
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/layers/DocumentInteractionAttention.md b/docs/api_docs/python/tfr/keras/layers/DocumentInteractionAttention.md
index 10ea7d4..818fa15 100644
--- a/docs/api_docs/python/tfr/keras/layers/DocumentInteractionAttention.md
+++ b/docs/api_docs/python/tfr/keras/layers/DocumentInteractionAttention.md
@@ -7,7 +7,6 @@ description: Cross Document Interaction Attention layer.
-
@@ -17,6 +16,8 @@ description: Cross Document Interaction Attention layer.
+
+
@@ -255,20 +256,8 @@ gradients back to the corresponding variables.
```
|
`metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- |
`name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. |
`name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -415,89 +404,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
View
@@ -539,6 +445,29 @@ correspond to `inputs` argument of call.
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -715,6 +644,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -799,6 +752,64 @@ Weights values as a list of NumPy arrays.
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/layers/FlattenList.md b/docs/api_docs/python/tfr/keras/layers/FlattenList.md
index 50f9ec2..b4c39e5 100644
--- a/docs/api_docs/python/tfr/keras/layers/FlattenList.md
+++ b/docs/api_docs/python/tfr/keras/layers/FlattenList.md
@@ -7,7 +7,6 @@ description: Layer to flatten the example list.
-
@@ -17,6 +16,8 @@ description: Layer to flatten the example list.
+
+
@@ -204,20 +205,8 @@ gradients back to the corresponding variables.
```
`metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- |
`name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. |
`name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -364,89 +353,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -489,6 +395,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -665,6 +594,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -749,6 +702,64 @@ Weights values as a list of NumPy arrays.
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/layers/GAMLayer.md b/docs/api_docs/python/tfr/keras/layers/GAMLayer.md
index ea947d4..2aee7ba 100644
--- a/docs/api_docs/python/tfr/keras/layers/GAMLayer.md
+++ b/docs/api_docs/python/tfr/keras/layers/GAMLayer.md
@@ -7,7 +7,6 @@ description: Defines a generalized additive model (GAM) layer.
-
@@ -17,6 +16,8 @@ description: Defines a generalized additive model (GAM) layer.
+
+
@@ -298,20 +299,8 @@ gradients back to the corresponding variables.
```
`metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- |
`name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. |
`name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -458,89 +447,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -583,6 +489,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -759,6 +688,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -843,6 +796,64 @@ Weights values as a list of NumPy arrays.
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/layers/RestoreList.md b/docs/api_docs/python/tfr/keras/layers/RestoreList.md
index 1ae3e73..150c338 100644
--- a/docs/api_docs/python/tfr/keras/layers/RestoreList.md
+++ b/docs/api_docs/python/tfr/keras/layers/RestoreList.md
@@ -7,7 +7,6 @@ description: Output layer to restore listwise output shape.
-
@@ -17,6 +16,8 @@ description: Output layer to restore listwise output shape.
+
+
@@ -181,20 +182,8 @@ gradients back to the corresponding variables.
```
`metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- |
`name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. |
`name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -341,89 +330,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -466,6 +372,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -642,6 +571,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -726,6 +679,64 @@ Weights values as a list of NumPy arrays.
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/layers/SelfAttentionMask.md b/docs/api_docs/python/tfr/keras/layers/SelfAttentionMask.md
index cc4d627..675374f 100644
--- a/docs/api_docs/python/tfr/keras/layers/SelfAttentionMask.md
+++ b/docs/api_docs/python/tfr/keras/layers/SelfAttentionMask.md
@@ -7,7 +7,6 @@ description: Create 3D attention mask from a 2D tensor mask.
-
@@ -17,6 +16,8 @@ description: Create 3D attention mask from a 2D tensor mask.
+
+
@@ -162,20 +163,8 @@ gradients back to the corresponding variables.
```
`metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- |
`name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. |
`name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -322,89 +311,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -447,6 +353,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -623,6 +552,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
@@ -704,6 +657,64 @@ Weights values as a list of NumPy arrays.
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/losses.md b/docs/api_docs/python/tfr/keras/losses.md
index 5143f3c..57b64fb 100644
--- a/docs/api_docs/python/tfr/keras/losses.md
+++ b/docs/api_docs/python/tfr/keras/losses.md
@@ -91,6 +91,12 @@ cross-entropy loss between `y_true` and `y_pred`.
[`class UniqueSoftmaxLoss`](../../tfr/keras/losses/UniqueSoftmaxLoss.md):
Computes unique softmax cross-entropy loss between `y_true` and `y_pred`.
+[`class YetiDCGLambdaWeight`](../../tfr/keras/losses/YetiDCGLambdaWeight.md):
+Keras serializable class for YetiDCGLambdaWeight.
+
+[`class YetiLogisticLoss`](../../tfr/keras/losses/YetiLogisticLoss.md): Computes
+Yeti logistic loss between `y_true` and `y_pred`.
+
## Functions
[`get(...)`](../../tfr/keras/losses/get.md): Factory method to get a ranking
diff --git a/docs/api_docs/python/tfr/keras/losses/ApproxMRRLoss.md b/docs/api_docs/python/tfr/keras/losses/ApproxMRRLoss.md
index 309f213..1460050 100644
--- a/docs/api_docs/python/tfr/keras/losses/ApproxMRRLoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/ApproxMRRLoss.md
@@ -15,7 +15,7 @@ description: Computes approximate MRR loss between y_true and y_pred.
-
+
View source on GitHub
@@ -113,13 +113,14 @@ Retrieval Measures, Qin et al, 2008][qin2008]
|
Type of `tf.keras.losses.Reduction` to apply to
-loss. Default value is `AUTO`. `AUTO` indicates that the reduction
-option will be determined by the usage context. For almost all cases
-this defaults to `SUM_OVER_BATCH_SIZE`. When used under a
-`tf.distribute.Strategy`, except via `Model.compile()` and
-`Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
-will raise an error. Please see this custom training [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training)
- for more details.
+loss. Default value is `AUTO`. `AUTO` indicates that the
+reduction option will be determined by the usage context. For
+almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
+used under a `tf.distribute.Strategy`, except via
+`Model.compile()` and `Model.fit()`, using `AUTO` or
+`SUM_OVER_BATCH_SIZE` will raise an error. Please see this
+custom training [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training)
+for more details.
|
@@ -135,7 +136,7 @@ Optional name for the instance.
from_config
-View
+View
source
@@ -176,7 +177,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -187,7 +188,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/ApproxNDCGLoss.md b/docs/api_docs/python/tfr/keras/losses/ApproxNDCGLoss.md
index e431c60..9a0c16d 100644
--- a/docs/api_docs/python/tfr/keras/losses/ApproxNDCGLoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/ApproxNDCGLoss.md
@@ -15,7 +15,7 @@ description: Computes approximate NDCG loss between y_true and y_pred.
-
+
View source on GitHub
@@ -117,13 +117,14 @@ $$
|
Type of `tf.keras.losses.Reduction` to apply to
-loss. Default value is `AUTO`. `AUTO` indicates that the reduction
-option will be determined by the usage context. For almost all cases
-this defaults to `SUM_OVER_BATCH_SIZE`. When used under a
-`tf.distribute.Strategy`, except via `Model.compile()` and
-`Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
-will raise an error. Please see this custom training [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training)
- for more details.
+loss. Default value is `AUTO`. `AUTO` indicates that the
+reduction option will be determined by the usage context. For
+almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
+used under a `tf.distribute.Strategy`, except via
+`Model.compile()` and `Model.fit()`, using `AUTO` or
+`SUM_OVER_BATCH_SIZE` will raise an error. Please see this
+custom training [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training)
+for more details.
|
@@ -139,7 +140,7 @@ Optional name for the instance.
from_config
-View
+View
source
@@ -180,7 +181,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -191,7 +192,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/ClickEMLoss.md b/docs/api_docs/python/tfr/keras/losses/ClickEMLoss.md
index 3659d92..4094e38 100644
--- a/docs/api_docs/python/tfr/keras/losses/ClickEMLoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/ClickEMLoss.md
@@ -15,7 +15,7 @@ description: Computes click EM loss between y_true and y_pred.
-
+
View source on GitHub
@@ -173,7 +173,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -184,7 +184,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/CoupledRankDistilLoss.md b/docs/api_docs/python/tfr/keras/losses/CoupledRankDistilLoss.md
index 38072f9..c1a6fb3 100644
--- a/docs/api_docs/python/tfr/keras/losses/CoupledRankDistilLoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/CoupledRankDistilLoss.md
@@ -15,7 +15,7 @@ description: Computes the Rank Distil loss between y_true and y_pred.
-
+
View source on GitHub
@@ -86,7 +86,6 @@ The Coupled-RankDistil loss is defined as: $$ \mathcal{L}(y, s) = -\sum_{\pi}
[reddi2021]: https://research.google/pubs/pub50695/
-
Args |
@@ -156,7 +155,6 @@ all entries in the list.
Instantiates a `Loss` from its config (output of `get_config()`).
-
Args |
@@ -172,7 +170,6 @@ Output of `get_config()`.
-
Returns |
@@ -186,7 +183,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -197,7 +194,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/DCGLambdaWeight.md b/docs/api_docs/python/tfr/keras/losses/DCGLambdaWeight.md
index de99c5a..f66eadf 100644
--- a/docs/api_docs/python/tfr/keras/losses/DCGLambdaWeight.md
+++ b/docs/api_docs/python/tfr/keras/losses/DCGLambdaWeight.md
@@ -15,7 +15,7 @@ description: Keras serializable class for DCG.
-
+
View source on GitHub
@@ -85,7 +85,7 @@ LambdaMART.
get_config
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/GumbelApproxNDCGLoss.md b/docs/api_docs/python/tfr/keras/losses/GumbelApproxNDCGLoss.md
index a42c7bd..c9310f2 100644
--- a/docs/api_docs/python/tfr/keras/losses/GumbelApproxNDCGLoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/GumbelApproxNDCGLoss.md
@@ -16,7 +16,7 @@ y_pred.
-
+
View source on GitHub
@@ -125,13 +125,14 @@ $$
|
Type of `tf.keras.losses.Reduction` to apply to
-loss. Default value is `AUTO`. `AUTO` indicates that the reduction
-option will be determined by the usage context. For almost all cases
-this defaults to `SUM_OVER_BATCH_SIZE`. When used under a
-`tf.distribute.Strategy`, except via `Model.compile()` and
-`Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
-will raise an error. Please see this custom training [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training)
- for more details.
+loss. Default value is `AUTO`. `AUTO` indicates that the
+reduction option will be determined by the usage context. For
+almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
+used under a `tf.distribute.Strategy`, except via
+`Model.compile()` and `Model.fit()`, using `AUTO` or
+`SUM_OVER_BATCH_SIZE` will raise an error. Please see this
+custom training [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training)
+for more details.
|
@@ -147,7 +148,7 @@ Optional name for the instance.
from_config
-View
+View
source
@@ -188,7 +189,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -199,7 +200,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/LabelDiffLambdaWeight.md b/docs/api_docs/python/tfr/keras/losses/LabelDiffLambdaWeight.md
index 62ac7d5..23716d7 100644
--- a/docs/api_docs/python/tfr/keras/losses/LabelDiffLambdaWeight.md
+++ b/docs/api_docs/python/tfr/keras/losses/LabelDiffLambdaWeight.md
@@ -15,7 +15,7 @@ description: Keras serializable class for LabelDiffLambdaWeight.
-
+
View source on GitHub
@@ -36,7 +36,7 @@ Keras serializable class for LabelDiffLambdaWeight.
get_config
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/ListMLELambdaWeight.md b/docs/api_docs/python/tfr/keras/losses/ListMLELambdaWeight.md
index f3a7f05..aea61de 100644
--- a/docs/api_docs/python/tfr/keras/losses/ListMLELambdaWeight.md
+++ b/docs/api_docs/python/tfr/keras/losses/ListMLELambdaWeight.md
@@ -15,7 +15,7 @@ description: LambdaWeight for ListMLE cost function.
-
+
View source on GitHub
@@ -52,7 +52,7 @@ LambdaWeight for ListMLE cost function.
get_config
-View
+View
source
@@ -61,7 +61,7 @@ source
individual_weights
-View
+View
source
@@ -74,7 +74,7 @@ See `_LambdaWeight`.
pair_weights
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/ListMLELoss.md b/docs/api_docs/python/tfr/keras/losses/ListMLELoss.md
index bba6532..eda113e 100644
--- a/docs/api_docs/python/tfr/keras/losses/ListMLELoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/ListMLELoss.md
@@ -15,7 +15,7 @@ description: Computes ListMLE loss between y_true and y_pred.
-
+
View source on GitHub
@@ -153,7 +153,7 @@ False, this loss will accept dense tensors.
from_config
-View
+View
source
@@ -194,7 +194,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -205,7 +205,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/MeanSquaredLoss.md b/docs/api_docs/python/tfr/keras/losses/MeanSquaredLoss.md
index d38cb25..a3a9a99 100644
--- a/docs/api_docs/python/tfr/keras/losses/MeanSquaredLoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/MeanSquaredLoss.md
@@ -15,7 +15,7 @@ description: Computes mean squared loss between y_true and y_pred.
-
+
View source on GitHub
@@ -142,7 +142,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -153,7 +153,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/MixtureEMLoss.md b/docs/api_docs/python/tfr/keras/losses/MixtureEMLoss.md
index 6a77a62..55891f9 100644
--- a/docs/api_docs/python/tfr/keras/losses/MixtureEMLoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/MixtureEMLoss.md
@@ -15,7 +15,7 @@ description: Computes mixture EM loss between y_true and y_pred.
-
+
View source on GitHub
@@ -145,7 +145,7 @@ False, this loss will accept dense tensors.
from_config
-View
+View
source
@@ -186,7 +186,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -197,7 +197,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/NDCGLambdaWeight.md b/docs/api_docs/python/tfr/keras/losses/NDCGLambdaWeight.md
index 7f9b7f3..83b096b 100644
--- a/docs/api_docs/python/tfr/keras/losses/NDCGLambdaWeight.md
+++ b/docs/api_docs/python/tfr/keras/losses/NDCGLambdaWeight.md
@@ -15,7 +15,7 @@ description: Keras serializable class for NDCG.
-
+
View source on GitHub
@@ -86,7 +86,7 @@ LambdaMART.
get_config
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/NDCGLambdaWeightV2.md b/docs/api_docs/python/tfr/keras/losses/NDCGLambdaWeightV2.md
index 6a499ba..c75e7ac 100644
--- a/docs/api_docs/python/tfr/keras/losses/NDCGLambdaWeightV2.md
+++ b/docs/api_docs/python/tfr/keras/losses/NDCGLambdaWeightV2.md
@@ -15,7 +15,7 @@ description: Keras serializable class for NDCG LambdaWeight V2 for topn.
-
+
View source on GitHub
@@ -75,7 +75,7 @@ Keras serializable class for NDCG LambdaWeight V2 for topn.
get_config
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/OrdinalLoss.md b/docs/api_docs/python/tfr/keras/losses/OrdinalLoss.md
index d225493..8e443c9 100644
--- a/docs/api_docs/python/tfr/keras/losses/OrdinalLoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/OrdinalLoss.md
@@ -15,7 +15,7 @@ description: Computes the Ordinal loss between y_true and y_pred.
-
+
View source on GitHub
@@ -85,13 +85,14 @@ $$
|
Type of `tf.keras.losses.Reduction` to apply to
-loss. Default value is `AUTO`. `AUTO` indicates that the reduction
-option will be determined by the usage context. For almost all cases
-this defaults to `SUM_OVER_BATCH_SIZE`. When used under a
-`tf.distribute.Strategy`, except via `Model.compile()` and
-`Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
-will raise an error. Please see this custom training [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training)
- for more details.
+loss. Default value is `AUTO`. `AUTO` indicates that the
+reduction option will be determined by the usage context. For
+almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
+used under a `tf.distribute.Strategy`, except via
+`Model.compile()` and `Model.fit()`, using `AUTO` or
+`SUM_OVER_BATCH_SIZE` will raise an error. Please see this
+custom training [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training)
+for more details.
|
@@ -145,7 +146,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -156,7 +157,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/PairwiseHingeLoss.md b/docs/api_docs/python/tfr/keras/losses/PairwiseHingeLoss.md
index 2e168c1..9b384ff 100644
--- a/docs/api_docs/python/tfr/keras/losses/PairwiseHingeLoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/PairwiseHingeLoss.md
@@ -15,7 +15,7 @@ description: Computes pairwise hinge loss between y_true and y_pred.
-
+
View source on GitHub
@@ -126,7 +126,7 @@ False, this loss will accept dense tensors.
from_config
-View
+View
source
@@ -167,7 +167,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -178,7 +178,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/PairwiseLogisticLoss.md b/docs/api_docs/python/tfr/keras/losses/PairwiseLogisticLoss.md
index a640d3f..0416c0a 100644
--- a/docs/api_docs/python/tfr/keras/losses/PairwiseLogisticLoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/PairwiseLogisticLoss.md
@@ -15,7 +15,7 @@ description: Computes pairwise logistic loss between y_true and y_pred.
-
+
View source on GitHub
@@ -126,7 +126,7 @@ False, this loss will accept dense tensors.
from_config
-View
+View
source
@@ -167,7 +167,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -178,7 +178,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/PairwiseMSELoss.md b/docs/api_docs/python/tfr/keras/losses/PairwiseMSELoss.md
index 9eb9c33..9744ee4 100644
--- a/docs/api_docs/python/tfr/keras/losses/PairwiseMSELoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/PairwiseMSELoss.md
@@ -16,7 +16,7 @@ y_pred.
-
+
View source on GitHub
@@ -128,7 +128,7 @@ False, this loss will accept dense tensors.
from_config
-View
+View
source
@@ -169,7 +169,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -180,7 +180,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/PairwiseSoftZeroOneLoss.md b/docs/api_docs/python/tfr/keras/losses/PairwiseSoftZeroOneLoss.md
index f0af422..d6822a2 100644
--- a/docs/api_docs/python/tfr/keras/losses/PairwiseSoftZeroOneLoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/PairwiseSoftZeroOneLoss.md
@@ -15,7 +15,7 @@ description: Computes pairwise soft zero-one loss between y_true and y_pred.
-
+
View source on GitHub
@@ -127,7 +127,7 @@ False, this loss will accept dense tensors.
from_config
-View
+View
source
@@ -168,7 +168,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -179,7 +179,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/PrecisionLambdaWeight.md b/docs/api_docs/python/tfr/keras/losses/PrecisionLambdaWeight.md
index 167bed0..16361ca 100644
--- a/docs/api_docs/python/tfr/keras/losses/PrecisionLambdaWeight.md
+++ b/docs/api_docs/python/tfr/keras/losses/PrecisionLambdaWeight.md
@@ -15,7 +15,7 @@ description: Keras serializable class for Precision.
-
+
View source on GitHub
@@ -61,7 +61,7 @@ for positive examples. The rest are negative examples.
get_config
-View
+View
source
@@ -118,7 +118,7 @@ A `Tensor` that can weight individual examples.
pair_weights
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/RankingLossKey.md b/docs/api_docs/python/tfr/keras/losses/RankingLossKey.md
index 9ffc302..8da9d24 100644
--- a/docs/api_docs/python/tfr/keras/losses/RankingLossKey.md
+++ b/docs/api_docs/python/tfr/keras/losses/RankingLossKey.md
@@ -18,6 +18,7 @@ description: Ranking loss key strings.
+
# tfr.keras.losses.RankingLossKey
@@ -26,7 +27,7 @@ description: Ranking loss key strings.
-
+
View source on GitHub
@@ -41,7 +42,7 @@ Ranking loss key strings.
all_keys
-View
+View
source
@@ -152,5 +153,12 @@ UNIQUE_SOFTMAX_LOSS
`'unique_softmax_loss'`
|
+
+
+YETI_LOGISTIC_LOSS
+ |
+
+`'yeti_logistic_loss'`
+ |
|
diff --git a/docs/api_docs/python/tfr/keras/losses/SigmoidCrossEntropyLoss.md b/docs/api_docs/python/tfr/keras/losses/SigmoidCrossEntropyLoss.md
index 768ad2d..ee1cb37 100644
--- a/docs/api_docs/python/tfr/keras/losses/SigmoidCrossEntropyLoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/SigmoidCrossEntropyLoss.md
@@ -15,7 +15,7 @@ description: Computes the Sigmoid cross-entropy loss between y_true and y_pred.
-
+
View source on GitHub
@@ -85,13 +85,14 @@ $$
|
Type of `tf.keras.losses.Reduction` to apply to
-loss. Default value is `AUTO`. `AUTO` indicates that the reduction
-option will be determined by the usage context. For almost all cases
-this defaults to `SUM_OVER_BATCH_SIZE`. When used under a
-`tf.distribute.Strategy`, except via `Model.compile()` and
-`Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
-will raise an error. Please see this custom training [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training)
- for more details.
+loss. Default value is `AUTO`. `AUTO` indicates that the
+reduction option will be determined by the usage context. For
+almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
+used under a `tf.distribute.Strategy`, except via
+`Model.compile()` and `Model.fit()`, using `AUTO` or
+`SUM_OVER_BATCH_SIZE` will raise an error. Please see this
+custom training [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training)
+for more details.
|
@@ -145,7 +146,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -156,7 +157,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/SoftmaxLoss.md b/docs/api_docs/python/tfr/keras/losses/SoftmaxLoss.md
index 8f25a30..e05b23f 100644
--- a/docs/api_docs/python/tfr/keras/losses/SoftmaxLoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/SoftmaxLoss.md
@@ -15,7 +15,7 @@ description: Computes Softmax cross-entropy loss between y_true and y_pred.
-
+
View source on GitHub
@@ -126,7 +126,7 @@ False, this loss will accept dense tensors.
from_config
-View
+View
source
@@ -167,7 +167,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -178,7 +178,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/UniqueSoftmaxLoss.md b/docs/api_docs/python/tfr/keras/losses/UniqueSoftmaxLoss.md
index 9b8bfc8..7e1d7ec 100644
--- a/docs/api_docs/python/tfr/keras/losses/UniqueSoftmaxLoss.md
+++ b/docs/api_docs/python/tfr/keras/losses/UniqueSoftmaxLoss.md
@@ -16,7 +16,7 @@ y_pred.
-
+
View source on GitHub
@@ -104,13 +104,14 @@ $$
|
Type of `tf.keras.losses.Reduction` to apply to
-loss. Default value is `AUTO`. `AUTO` indicates that the reduction
-option will be determined by the usage context. For almost all cases
-this defaults to `SUM_OVER_BATCH_SIZE`. When used under a
-`tf.distribute.Strategy`, except via `Model.compile()` and
-`Model.fit()`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
-will raise an error. Please see this custom training [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training)
- for more details.
+loss. Default value is `AUTO`. `AUTO` indicates that the
+reduction option will be determined by the usage context. For
+almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
+used under a `tf.distribute.Strategy`, except via
+`Model.compile()` and `Model.fit()`, using `AUTO` or
+`SUM_OVER_BATCH_SIZE` will raise an error. Please see this
+custom training [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training)
+for more details.
|
@@ -126,7 +127,7 @@ Optional name for the instance.
from_config
-View
+View
source
@@ -167,7 +168,7 @@ A `Loss` instance.
get_config
-View
+View
source
@@ -178,7 +179,7 @@ Returns the config dictionary for a `Loss` instance.
__call__
-View
+View
source
diff --git a/docs/api_docs/python/tfr/keras/losses/YetiDCGLambdaWeight.md b/docs/api_docs/python/tfr/keras/losses/YetiDCGLambdaWeight.md
new file mode 100644
index 0000000..eb0cba8
--- /dev/null
+++ b/docs/api_docs/python/tfr/keras/losses/YetiDCGLambdaWeight.md
@@ -0,0 +1,110 @@
+description: Keras serializable class for YetiDCGLambdaWeight.
+
+
+
+
+
+
+
+
+
+
+# tfr.keras.losses.YetiDCGLambdaWeight
+
+
+
+
+
+Keras serializable class for YetiDCGLambdaWeight.
+
+
+tfr.keras.losses.YetiDCGLambdaWeight(
+ topn: Optional[int] = None,
+ gain_fn: Optional[tfr.keras.utils.GainFunction ] = None,
+ rank_discount_fn: Optional[tfr.keras.utils.GainFunction ] = None,
+ normalized: bool = False,
+ **kwargs
+)
+
+
+
+
+
+
+
+Args |
+
+
+
+`topn`
+ |
+
+(int) The topn for the DCG metric.
+ |
+
+
+`gain_fn`
+ |
+
+(function) Transforms labels.
+ |
+
+
+`rank_discount_fn`
+ |
+
+(function) The rank discount function.
+ |
+
+
+`normalized`
+ |
+
+(bool) If True, normalize weight by the max DCG.
+ |
+
+
+
+## Methods
+
+get_config
+
+View
+source
+
+
+get_config() -> Dict[str, Any]
+
+
+individual_weights
+
+View
+source
+
+
+individual_weights(
+ labels, ranks
+)
+
+
+See `_LambdaWeight`.
+
+pair_weights
+
+View
+source
+
+
+pair_weights(
+ labels: tf.Tensor, ranks: tf.Tensor
+) -> tf.Tensor
+
+
+See `_LambdaWeight`.
diff --git a/docs/api_docs/python/tfr/keras/losses/YetiLogisticLoss.md b/docs/api_docs/python/tfr/keras/losses/YetiLogisticLoss.md
new file mode 100644
index 0000000..ab094cd
--- /dev/null
+++ b/docs/api_docs/python/tfr/keras/losses/YetiLogisticLoss.md
@@ -0,0 +1,204 @@
+description: Computes Yeti logistic loss between y_true and y_pred.
+
+
+
+
+
+
+
+
+
+
+# tfr.keras.losses.YetiLogisticLoss
+
+
+
+
+
+Computes Yeti logistic loss between `y_true` and `y_pred`.
+
+
+tfr.keras.losses.YetiLogisticLoss(
+ reduction: tf.losses.Reduction = tf.losses.Reduction.AUTO,
+ name: Optional[str] = None,
+ lambda_weight: Optional[tfr.keras.losses.YetiDCGLambdaWeight ] = None,
+ temperature: float = 0.1,
+ sample_size: int = 8,
+ gumbel_temperature: float = 1.0,
+ seed: Optional[int] = None,
+ ragged: bool = False
+)
+
+
+
+
+Adapted to neural network models from the Yeti loss implemenation for GBDT in
+([Lyzhin et al, 2022][lyzhin2022]).
+
+In this code base, we support Yeti loss with the DCG lambda weight option. The
+default uses the YetiDCGLambdaWeight with default settings. To customize, please
+set the lambda_weight to YetiDCGLambdaWeight.
+
+For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
+
+```
+loss = sum_a sum_i I[y_i > y_{i\pm 1}] * log(1 + exp(-(s^a_i - s^a_{i\pm 1})))
+```
+
+where `s^a_i = s_i + gumbel(0, 1)^a`
+
+#### Standalone usage:
+
+```
+>>> y_true = [[1., 0.]]
+>>> y_pred = [[0.6, 0.8]]
+>>> loss = tfr.keras.losses.YetiLogisticLoss(sample_size=2, seed=1)
+>>> loss(y_true, y_pred).numpy()
+0.90761846
+```
+
+```
+>>> # Using ragged tensors
+>>> y_true = tf.ragged.constant([[1., 0.], [0., 1., 0.]])
+>>> y_pred = tf.ragged.constant([[0.6, 0.8], [0.5, 0.8, 0.4]])
+>>> loss = tfr.keras.losses.YetiLogisticLoss(seed=1, ragged=True)
+>>> loss(y_true, y_pred).numpy()
+0.43420443
+```
+
+Usage with the `compile()` API:
+
+```python
+model.compile(optimizer='sgd', loss=tfr.keras.losses.YetiLogisticLoss())
+```
+
+#### Definition:
+
+$$
+\mathcal{L}(\{y\}, \{s\}) =
+\sum_a \sum_i \sum_{j=i\pm 1}I[y_i > y_j] \log(1 + \exp(-(s^a_i - s^a_j)))
+$$
+
+
+
+
+
+References |
+
+
+- [Which Tricks are Important for Learning to Rank?, Lyzhin et al, 2022][lyzhin2022]
+ |
+
+
+
+
+[lyzhin2022]: https://arxiv.org/abs/2204.01500
+
+
+
+
+
+Args |
+
+
+
+`reduction`
+ |
+
+Type of `tf.keras.losses.Reduction` to apply to
+loss. Default value is `AUTO`. `AUTO` indicates that the
+reduction option will be determined by the usage context. For
+almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
+used under a `tf.distribute.Strategy`, except via
+`Model.compile()` and `Model.fit()`, using `AUTO` or
+`SUM_OVER_BATCH_SIZE` will raise an error. Please see this
+custom training [tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training)
+for more details.
+ |
+
+
+`name`
+ |
+
+Optional name for the instance.
+ |
+
+
+
+## Methods
+
+from_config
+
+View
+source
+
+
+@classmethod
+from_config(
+ config, custom_objects=None
+)
+
+
+Instantiates a `Loss` from its config (output of `get_config()`).
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Output of `get_config()`.
+ |
+
+
+
+
+
+
+
+Returns |
+
+
+A `Loss` instance.
+ |
+
+
+
+
+get_config
+
+View
+source
+
+
+get_config() -> Dict[str, Any]
+
+
+Returns the config dictionary for a `Loss` instance.
+
+__call__
+
+View
+source
+
+
+__call__(
+ y_true: tfr.keras.model.TensorLike ,
+ y_pred: tfr.keras.model.TensorLike ,
+ sample_weight: Optional[utils.TensorLike] = None
+) -> tf.Tensor
+
+
+See _RankingLoss.
diff --git a/docs/api_docs/python/tfr/keras/losses/get.md b/docs/api_docs/python/tfr/keras/losses/get.md
index f1ac68a..29c5d0e 100644
--- a/docs/api_docs/python/tfr/keras/losses/get.md
+++ b/docs/api_docs/python/tfr/keras/losses/get.md
@@ -11,7 +11,7 @@ description: Factory method to get a ranking loss class.
-
+
View source on GitHub
diff --git a/docs/api_docs/python/tfr/keras/metrics/ARPMetric.md b/docs/api_docs/python/tfr/keras/metrics/ARPMetric.md
index 70738bb..c9d9334 100644
--- a/docs/api_docs/python/tfr/keras/metrics/ARPMetric.md
+++ b/docs/api_docs/python/tfr/keras/metrics/ARPMetric.md
@@ -7,7 +7,6 @@ description: Average relevance position (ARP).
-
@@ -17,9 +16,11 @@ description: Average relevance position (ARP).
+
+
@@ -192,20 +193,8 @@ gradients back to the corresponding variables.
```
| `metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- | `name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. | `name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -352,89 +341,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -477,6 +383,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -653,6 +582,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -714,6 +667,35 @@ Weights values as a list of NumPy arrays.
|
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
merge_state
@@ -814,6 +796,35 @@ A scalar tensor, or a dictionary of scalar tensors.
|
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/metrics/AlphaDCGMetric.md b/docs/api_docs/python/tfr/keras/metrics/AlphaDCGMetric.md
index 6cc6598..7ed1ff5 100644
--- a/docs/api_docs/python/tfr/keras/metrics/AlphaDCGMetric.md
+++ b/docs/api_docs/python/tfr/keras/metrics/AlphaDCGMetric.md
@@ -7,7 +7,6 @@ description: Alpha discounted cumulative gain (alphaDCG).
-
@@ -17,9 +16,11 @@ description: Alpha discounted cumulative gain (alphaDCG).
+
+
@@ -309,20 +310,8 @@ gradients back to the corresponding variables.
```
| `metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- | `name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. | `name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -469,89 +458,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -594,6 +500,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -770,6 +699,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -831,6 +784,35 @@ Weights values as a list of NumPy arrays.
|
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
merge_state
@@ -931,6 +913,35 @@ A scalar tensor, or a dictionary of scalar tensors.
|
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/metrics/DCGMetric.md b/docs/api_docs/python/tfr/keras/metrics/DCGMetric.md
index 84ae3da..5b18b83 100644
--- a/docs/api_docs/python/tfr/keras/metrics/DCGMetric.md
+++ b/docs/api_docs/python/tfr/keras/metrics/DCGMetric.md
@@ -7,7 +7,6 @@ description: Discounted cumulative gain (DCG).
-
@@ -17,9 +16,11 @@ description: Discounted cumulative gain (DCG).
+
+
@@ -222,20 +223,8 @@ gradients back to the corresponding variables.
```
| `metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- | `name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. | `name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -382,89 +371,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -507,6 +413,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -683,6 +612,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -744,6 +697,35 @@ Weights values as a list of NumPy arrays.
|
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
merge_state
@@ -844,6 +826,35 @@ A scalar tensor, or a dictionary of scalar tensors.
|
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/metrics/HitsMetric.md b/docs/api_docs/python/tfr/keras/metrics/HitsMetric.md
index 23d59c1..643c92f 100644
--- a/docs/api_docs/python/tfr/keras/metrics/HitsMetric.md
+++ b/docs/api_docs/python/tfr/keras/metrics/HitsMetric.md
@@ -7,7 +7,6 @@ description: Hits@k metric.
-
@@ -17,9 +16,11 @@ description: Hits@k metric.
+
+
@@ -202,20 +203,8 @@ gradients back to the corresponding variables.
```
| `metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- | `name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. | `name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -362,89 +351,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -487,6 +393,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -663,6 +592,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -724,6 +677,35 @@ Weights values as a list of NumPy arrays.
|
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
merge_state
@@ -824,6 +806,35 @@ A scalar tensor, or a dictionary of scalar tensors.
|
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/metrics/MRRMetric.md b/docs/api_docs/python/tfr/keras/metrics/MRRMetric.md
index 4bd26c1..41b4926 100644
--- a/docs/api_docs/python/tfr/keras/metrics/MRRMetric.md
+++ b/docs/api_docs/python/tfr/keras/metrics/MRRMetric.md
@@ -7,7 +7,6 @@ description: Mean reciprocal rank (MRR).
-
@@ -17,9 +16,11 @@ description: Mean reciprocal rank (MRR).
+
+
@@ -201,20 +202,8 @@ gradients back to the corresponding variables.
```
| `metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- | `name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. | `name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -361,89 +350,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -486,6 +392,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -662,6 +591,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -723,6 +676,35 @@ Weights values as a list of NumPy arrays.
|
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
merge_state
@@ -823,6 +805,35 @@ A scalar tensor, or a dictionary of scalar tensors.
|
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/metrics/MeanAveragePrecisionMetric.md b/docs/api_docs/python/tfr/keras/metrics/MeanAveragePrecisionMetric.md
index 7b3bd63..b1e4671 100644
--- a/docs/api_docs/python/tfr/keras/metrics/MeanAveragePrecisionMetric.md
+++ b/docs/api_docs/python/tfr/keras/metrics/MeanAveragePrecisionMetric.md
@@ -7,7 +7,6 @@ description: Mean average precision (MAP).
-
@@ -17,9 +16,11 @@ description: Mean average precision (MAP).
+
+
@@ -209,20 +210,8 @@ gradients back to the corresponding variables.
```
| `metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- | `name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. | `name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -369,89 +358,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -494,6 +400,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -670,6 +599,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -731,6 +684,35 @@ Weights values as a list of NumPy arrays.
|
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
merge_state
@@ -831,6 +813,35 @@ A scalar tensor, or a dictionary of scalar tensors.
|
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/metrics/NDCGMetric.md b/docs/api_docs/python/tfr/keras/metrics/NDCGMetric.md
index 92f088a..c9c52ad 100644
--- a/docs/api_docs/python/tfr/keras/metrics/NDCGMetric.md
+++ b/docs/api_docs/python/tfr/keras/metrics/NDCGMetric.md
@@ -7,7 +7,6 @@ description: Normalized discounted cumulative gain (NDCG).
-
@@ -17,9 +16,11 @@ description: Normalized discounted cumulative gain (NDCG).
+
+
@@ -227,20 +228,8 @@ gradients back to the corresponding variables.
```
| `metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- | `name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. | `name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -387,89 +376,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -512,6 +418,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -688,6 +617,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -749,6 +702,35 @@ Weights values as a list of NumPy arrays.
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
merge_state
@@ -849,6 +831,35 @@ A scalar tensor, or a dictionary of scalar tensors.
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/metrics/OPAMetric.md b/docs/api_docs/python/tfr/keras/metrics/OPAMetric.md
index 9977e47..7719964 100644
--- a/docs/api_docs/python/tfr/keras/metrics/OPAMetric.md
+++ b/docs/api_docs/python/tfr/keras/metrics/OPAMetric.md
@@ -7,7 +7,6 @@ description: Ordered pair accuracy (OPA).
-
@@ -17,9 +16,11 @@ description: Ordered pair accuracy (OPA).
+
+
@@ -200,20 +201,8 @@ gradients back to the corresponding variables.
```
| `metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- | `name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. | `name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -360,89 +349,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -485,6 +391,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -661,6 +590,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -722,6 +675,35 @@ Weights values as a list of NumPy arrays.
|
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
merge_state
@@ -822,6 +804,35 @@ A scalar tensor, or a dictionary of scalar tensors.
|
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/metrics/PrecisionIAMetric.md b/docs/api_docs/python/tfr/keras/metrics/PrecisionIAMetric.md
index 8ab15f2..00f6204 100644
--- a/docs/api_docs/python/tfr/keras/metrics/PrecisionIAMetric.md
+++ b/docs/api_docs/python/tfr/keras/metrics/PrecisionIAMetric.md
@@ -7,7 +7,6 @@ description: Precision-IA@k (Pre-IA@k).
-
@@ -17,9 +16,11 @@ description: Precision-IA@k (Pre-IA@k).
+
+
@@ -266,20 +267,8 @@ gradients back to the corresponding variables.
```
|
`metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- |
`name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. |
`name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -426,89 +415,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -551,6 +457,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -727,6 +656,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -788,6 +741,35 @@ Weights values as a list of NumPy arrays.
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
merge_state
@@ -888,6 +870,35 @@ A scalar tensor, or a dictionary of scalar tensors.
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/metrics/PrecisionMetric.md b/docs/api_docs/python/tfr/keras/metrics/PrecisionMetric.md
index acc0da2..83f97d1 100644
--- a/docs/api_docs/python/tfr/keras/metrics/PrecisionMetric.md
+++ b/docs/api_docs/python/tfr/keras/metrics/PrecisionMetric.md
@@ -7,7 +7,6 @@ description: Precision@k (P@k).
-
@@ -17,9 +16,11 @@ description: Precision@k (P@k).
+
+
@@ -204,20 +205,8 @@ gradients back to the corresponding variables.
```
`metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- |
`name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. |
`name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -364,89 +353,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -489,6 +395,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -665,6 +594,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -726,6 +679,35 @@ Weights values as a list of NumPy arrays.
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
merge_state
@@ -826,6 +808,35 @@ A scalar tensor, or a dictionary of scalar tensors.
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/metrics/RecallMetric.md b/docs/api_docs/python/tfr/keras/metrics/RecallMetric.md
index 088eae0..47f76a4 100644
--- a/docs/api_docs/python/tfr/keras/metrics/RecallMetric.md
+++ b/docs/api_docs/python/tfr/keras/metrics/RecallMetric.md
@@ -7,7 +7,6 @@ description: Recall@k (R@k).
-
@@ -17,9 +16,11 @@ description: Recall@k (R@k).
+
+
@@ -204,20 +205,8 @@ gradients back to the corresponding variables.
```
`metrics` | List of metrics
-added using the `add_metric()` API.
-
-```
->>> input = tf.keras.layers.Input(shape=(3,))
->>> d = tf.keras.layers.Dense(2)
->>> output = d(input)
->>> d.add_metric(tf.reduce_max(output), name='max')
->>> d.add_metric(tf.reduce_min(output), name='min')
->>> [m.name for m in d.metrics]
-['max', 'min']
-```
-
- |
`name` | Name of the layer
-(string), set in the constructor. |
+attached to the layer. |
`name` |
+Name of the layer (string), set in the constructor. |
`name_scope` | Returns a `tf.name_scope`
instance for this class. |
`non_trainable_weights` | List of all
@@ -364,89 +353,6 @@ Used for backwards compatibility only.
|
-add_metric
-
-
-add_metric(
- value, name=None, **kwargs
-)
-
-
-Adds metric tensor to the layer.
-
-This method can be used inside the `call()` method of a subclassed layer or
-model.
-
-```python
-class MyMetricLayer(tf.keras.layers.Layer):
- def __init__(self):
- super(MyMetricLayer, self).__init__(name='my_metric_layer')
- self.mean = tf.keras.metrics.Mean(name='metric_1')
-
- def call(self, inputs):
- self.add_metric(self.mean(inputs))
- self.add_metric(tf.reduce_sum(inputs), name='metric_2')
- return inputs
-```
-
-This method can also be called directly on a Functional Model during
-construction. In this case, any tensor passed to this Model must be symbolic and
-be able to be traced back to the model's `Input`s. These metrics become part of
-the model's topology and are tracked when you save the model via `save()`.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(math_ops.reduce_sum(x), name='metric_1')
-```
-
-Note: Calling `add_metric()` with the result of a metric object on a Functional
-Model, as shown in the example below, is not supported. This is because we
-cannot trace the metric result tensor back to the model's inputs.
-
-```python
-inputs = tf.keras.Input(shape=(10,))
-x = tf.keras.layers.Dense(10)(inputs)
-outputs = tf.keras.layers.Dense(1)(x)
-model = tf.keras.Model(inputs, outputs)
-model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
-```
-
-
-
-
-Args |
-
-
-
-`value`
- |
-
-Metric tensor.
- |
-
-
-`name`
- |
-
-String metric name.
- |
-
-
-`**kwargs`
- |
-
-Additional keyword arguments for backward compatibility.
-Accepted values:
-`aggregation` - When the `value` tensor provided is not the result
-of calling a `keras.Metric` instance, it will be aggregated by
-default using a `keras.Metric.Mean`.
- |
-
-
-
build
@@ -489,6 +395,29 @@ Instance of `TensorShape`, or list of instances of
)
+Builds the layer's states with the supplied config dict.
+
+By default, this method calls the `build(config["input_shape"])` method, which
+creates weights based on the layer's input shape in the supplied config. If your
+config contains other information needed to load the layer's state, you should
+override this method.
+
+
+
+
+
+Args |
+
+
+
+`config`
+ |
+
+Dict containing the input shape associated with this layer.
+ |
+
+
+
compute_mask
@@ -665,6 +594,30 @@ A layer instance.
get_build_config()
+Returns a dictionary with the layer's input shape.
+
+This method returns a config dict that can be used by
+`build_from_config(config)` to create all states (e.g. Variables and Lookup
+tables) needed by the layer.
+
+By default, the config only contains the input shape that the layer was built
+with. If you're writing a custom layer that creates state in an unusual way, you
+should override this method to make sure this state is already created when
+Keras attempts to load its value upon model loading.
+
+
+
+
+
+Returns |
+
+
+A dict containing the input shape associated with the layer.
+ |
+
+
+
+
get_config
View
@@ -726,6 +679,35 @@ Weights values as a list of NumPy arrays.
+load_own_variables
+
+
+load_own_variables(
+ store
+)
+
+
+Loads the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is loaded upon calling `keras.models.load_model()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict from which the state of the model will be loaded.
+ |
+
+
+
merge_state
@@ -826,6 +808,35 @@ A scalar tensor, or a dictionary of scalar tensors.
+save_own_variables
+
+
+save_own_variables(
+ store
+)
+
+
+Saves the state of the layer.
+
+You can override this method to take full control of how the state of the layer
+is saved upon calling `model.save()`.
+
+
+
+
+
+Args |
+
+
+
+`store`
+ |
+
+Dict where the state of the model will be saved.
+ |
+
+
+
set_weights
diff --git a/docs/api_docs/python/tfr/keras/pipeline/DatasetHparams/dataset_reader.md b/docs/api_docs/python/tfr/keras/pipeline/DatasetHparams/dataset_reader.md
index 87f5fc7..052cfe8 100644
--- a/docs/api_docs/python/tfr/keras/pipeline/DatasetHparams/dataset_reader.md
+++ b/docs/api_docs/python/tfr/keras/pipeline/DatasetHparams/dataset_reader.md
@@ -2731,7 +2731,6 @@ Optional. A name for the tf.data transformation.
-
Returns |
@@ -2788,7 +2787,6 @@ TensorShape([None])
```
-
Args |
@@ -2831,7 +2829,6 @@ their row_splits dtype changed.
-
Returns |
@@ -3567,6 +3564,20 @@ users should not set the `checkpoint` argument in `checkpoint_args`.
+
+
+
+
+Returns |
+
+
+An operation which when executed performs the save. When writing
+checkpoints, returns None. The return value is useful in unit tests.
+ |
+
+
+
+
@@ -3821,6 +3832,24 @@ list(dataset.as_numpy_iterator())
# [1, 0, 2]
```
+### Fully shuffling all the data
+
+To shuffle an entire dataset, set `buffer_size=dataset.cardinality(). This is
+equivalent to setting the`buffer_size` equal to the number of elements in the
+dataset, resulting in uniform shuffle.
+
+Note: `shuffle(dataset.cardinality())` loads the full dataset into memory so
+that it can be shuffled. This will cause a memory overflow (OOM) error if the
+dataset is too large, so full-shuffle should only be used for datasets that are
+known to fit in the memory, such as datasets of filenames or other small
+datasets.
+
+```python
+dataset = tf.data.Dataset.range(20)
+dataset = dataset.shuffle(dataset.cardinality())
+# [18, 4, 9, 2, 17, 8, 5, 10, 0, 6, 16, 3, 19, 7, 14, 11, 15, 13, 12, 1]
+```
+
@@ -3832,7 +3861,9 @@ list(dataset.as_numpy_iterator())
A `tf.int64` scalar `tf.Tensor`, representing the number of
-elements from this dataset from which the new dataset will sample.
+elements from this dataset from which the new dataset will sample. To
+uniformly shuffle the entire dataset, use
+`buffer_size=dataset.cardinality()`.
|
@@ -4048,7 +4079,6 @@ a snapshot.
|
-
Returns |
@@ -4096,7 +4126,6 @@ a.apply(tf.data.experimental.dense_to_sparse_batch(
```
-
Args |
@@ -4645,7 +4674,7 @@ when an option is set more than once to a non-default value
@staticmethod
zip(
- datasets, name=None
+ *args, datasets=None, name=None
)
@@ -4657,14 +4686,14 @@ structure of `Dataset` objects. The supported nesting mechanisms are documented
[here](https://www.tensorflow.org/guide/data#dataset_structure).
```
->>> # The nested structure of the `datasets` argument determines the
->>> # structure of elements in the resulting dataset.
+>>> # The datasets or nested structure of datasets `*args` argument
+>>> # determines the structure of elements in the resulting dataset.
>>> a = tf.data.Dataset.range(1, 4) # ==> [ 1, 2, 3 ]
>>> b = tf.data.Dataset.range(4, 7) # ==> [ 4, 5, 6 ]
->>> ds = tf.data.Dataset.zip((a, b))
+>>> ds = tf.data.Dataset.zip(a, b)
>>> list(ds.as_numpy_iterator())
[(1, 4), (2, 5), (3, 6)]
->>> ds = tf.data.Dataset.zip((b, a))
+>>> ds = tf.data.Dataset.zip(b, a)
>>> list(ds.as_numpy_iterator())
[(4, 1), (5, 2), (6, 3)]
>>>
@@ -4672,7 +4701,7 @@ structure of `Dataset` objects. The supported nesting mechanisms are documented
>>> c = tf.data.Dataset.range(7, 13).batch(2) # ==> [ [7, 8],
... # [9, 10],
... # [11, 12] ]
->>> ds = tf.data.Dataset.zip((a, b, c))
+>>> ds = tf.data.Dataset.zip(a, b, c)
>>> for element in ds.as_numpy_iterator():
... print(element)
(1, 4, array([7, 8]))
@@ -4682,7 +4711,7 @@ structure of `Dataset` objects. The supported nesting mechanisms are documented
>>> # The number of elements in the resulting dataset is the same as
>>> # the size of the smallest dataset in `datasets`.
>>> d = tf.data.Dataset.range(13, 15) # ==> [ 13, 14 ]
->>> ds = tf.data.Dataset.zip((a, d))
+>>> ds = tf.data.Dataset.zip(a, d)
>>> list(ds.as_numpy_iterator())
[(1, 13), (2, 14)]
```
@@ -4694,10 +4723,20 @@ structure of `Dataset` objects. The supported nesting mechanisms are documented
+`*args`
+ |
+
+Datasets or nested structures of datasets to zip together. This
+can't be set if `datasets` is set.
+ |
+
+
`datasets`
|
-A (nested) structure of datasets.
+A (nested) structure of datasets. This can't be set if `*args`
+is set. Note that this exists only for backwards compatibility and it is
+preferred to use *args.
|
diff --git a/docs/api_docs/python/tfr/keras/utils.md b/docs/api_docs/python/tfr/keras/utils.md
index fc0c4f2..5e0f32a 100644
--- a/docs/api_docs/python/tfr/keras/utils.md
+++ b/docs/api_docs/python/tfr/keras/utils.md
@@ -22,6 +22,8 @@ Utils for tfr.keras.
## Functions
+[`deserialize_keras_object(...)`](../../tfr/keras/utils/deserialize_keras_object.md)
+
[`identity(...)`](../../tfr/keras/utils/identity.md): Identity function that
returns the input label.
@@ -37,6 +39,8 @@ Computes whether label is greater or equal to 1.
[`pow_minus_1(...)`](../../tfr/keras/utils/pow_minus_1.md): Computes `2**x - 1`
element-wise for each label.
+[`serialize_keras_object(...)`](../../tfr/keras/utils/serialize_keras_object.md)
+
[`symmetric_log1p(...)`](../../tfr/keras/utils/symmetric_log1p.md): Computes
`sign(x) * log(1 + sign(x))`.
diff --git a/docs/api_docs/python/tfr/keras/utils/GainFunction.md b/docs/api_docs/python/tfr/keras/utils/GainFunction.md
index 601a3fb..6a57b20 100644
--- a/docs/api_docs/python/tfr/keras/utils/GainFunction.md
+++ b/docs/api_docs/python/tfr/keras/utils/GainFunction.md
@@ -6,7 +6,6 @@
# tfr.keras.utils.GainFunction
-
This symbol is a **type alias**.
#### Source:
diff --git a/docs/api_docs/python/tfr/keras/utils/deserialize_keras_object.md b/docs/api_docs/python/tfr/keras/utils/deserialize_keras_object.md
new file mode 100644
index 0000000..410d34c
--- /dev/null
+++ b/docs/api_docs/python/tfr/keras/utils/deserialize_keras_object.md
@@ -0,0 +1,28 @@
+
+
+
+
+
+# tfr.keras.utils.deserialize_keras_object
+
+
+
+
+
+
+tfr.keras.utils.deserialize_keras_object(
+ config,
+ module_objects=None,
+ custom_objects=None,
+ printable_module_name=None
+)
+
+
+
diff --git a/docs/api_docs/python/tfr/keras/utils/identity.md b/docs/api_docs/python/tfr/keras/utils/identity.md
index 38b84f4..c5f7342 100644
--- a/docs/api_docs/python/tfr/keras/utils/identity.md
+++ b/docs/api_docs/python/tfr/keras/utils/identity.md
@@ -11,7 +11,7 @@ description: Identity function that returns the input label.
-
+
View source on GitHub
diff --git a/docs/api_docs/python/tfr/keras/utils/inverse.md b/docs/api_docs/python/tfr/keras/utils/inverse.md
index df6a591..c489f6d 100644
--- a/docs/api_docs/python/tfr/keras/utils/inverse.md
+++ b/docs/api_docs/python/tfr/keras/utils/inverse.md
@@ -11,7 +11,7 @@ description: Computes the inverse of input rank.
-
+
View source on GitHub
diff --git a/docs/api_docs/python/tfr/keras/utils/is_greater_equal_1.md b/docs/api_docs/python/tfr/keras/utils/is_greater_equal_1.md
index aaca9fa..03e8aab 100644
--- a/docs/api_docs/python/tfr/keras/utils/is_greater_equal_1.md
+++ b/docs/api_docs/python/tfr/keras/utils/is_greater_equal_1.md
@@ -11,7 +11,7 @@ description: Computes whether label is greater or equal to 1.
-
+
View source on GitHub
diff --git a/docs/api_docs/python/tfr/keras/utils/log2_inverse.md b/docs/api_docs/python/tfr/keras/utils/log2_inverse.md
index 2d83921..1d13b21 100644
--- a/docs/api_docs/python/tfr/keras/utils/log2_inverse.md
+++ b/docs/api_docs/python/tfr/keras/utils/log2_inverse.md
@@ -11,7 +11,7 @@ description: Computes 1./log2(1+x) element-wise for each label.
-
+
View source on GitHub
diff --git a/docs/api_docs/python/tfr/keras/utils/pow_minus_1.md b/docs/api_docs/python/tfr/keras/utils/pow_minus_1.md
index ff94fc9..7e37be8 100644
--- a/docs/api_docs/python/tfr/keras/utils/pow_minus_1.md
+++ b/docs/api_docs/python/tfr/keras/utils/pow_minus_1.md
@@ -11,7 +11,7 @@ description: Computes 2**x - 1 element-wise for each label.
-
+
View source on GitHub
diff --git a/docs/api_docs/python/tfr/keras/utils/serialize_keras_object.md b/docs/api_docs/python/tfr/keras/utils/serialize_keras_object.md
new file mode 100644
index 0000000..f7f670f
--- /dev/null
+++ b/docs/api_docs/python/tfr/keras/utils/serialize_keras_object.md
@@ -0,0 +1,25 @@
+
+
+
+
+
+# tfr.keras.utils.serialize_keras_object
+
+
+
+
+
+
+tfr.keras.utils.serialize_keras_object(
+ obj
+)
+
+
+
diff --git a/docs/api_docs/python/tfr/keras/utils/symmetric_log1p.md b/docs/api_docs/python/tfr/keras/utils/symmetric_log1p.md
index 8b584e5..7f4dede 100644
--- a/docs/api_docs/python/tfr/keras/utils/symmetric_log1p.md
+++ b/docs/api_docs/python/tfr/keras/utils/symmetric_log1p.md
@@ -11,7 +11,7 @@ description: Computes sign(x) * log(1 + sign(x)).
-
+
View source on GitHub
diff --git a/docs/api_docs/python/tfr/utils/LossFunction.md b/docs/api_docs/python/tfr/utils/LossFunction.md
index 257807a..701fa49 100644
--- a/docs/api_docs/python/tfr/utils/LossFunction.md
+++ b/docs/api_docs/python/tfr/utils/LossFunction.md
@@ -6,7 +6,6 @@
# tfr.utils.LossFunction
-
This symbol is a **type alias**.
#### Source:
diff --git a/docs/api_docs/python/tfr/utils/parse_keys_and_weights.md b/docs/api_docs/python/tfr/utils/parse_keys_and_weights.md
index e13b4b2..35c067a 100644
--- a/docs/api_docs/python/tfr/utils/parse_keys_and_weights.md
+++ b/docs/api_docs/python/tfr/utils/parse_keys_and_weights.md
@@ -32,7 +32,6 @@ This parse function will remove all spaces. Different keys are split by "," and
then weight associated with key is split by ":".
-
Args |
@@ -50,7 +49,6 @@ and weighted by the weights split by ":". For example, key =
-
| | | | | | |