diff --git a/README.md b/README.md index 4df302891e..55a0d8853d 100644 --- a/README.md +++ b/README.md @@ -34,10 +34,10 @@ __Layer 1: Statistical Building Blocks__ __Layer 2: Model Building__ -* Edward2 ([`tfp.edward2`](https://github.com/tensorflow/probability/tree/master/tensorflow_probability/python/edward2)): +* Edward2 ([`tfp.edward2`](https://github.com/tensorflow/probability/tree/master/tensorflow_probability/python/experimental/edward2)): A probabilistic programming language for specifying flexible probabilistic models as programs. See the - [Edward2 `README.md`](https://github.com/tensorflow/probability/tree/master/tensorflow_probability/python/edward2/README.md). + [Edward2 `README.md`](https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/experimental/edward2/README.md). * Probabilistic Layers ([`tfp.layers`](https://github.com/tensorflow/probability/tree/master/tensorflow_probability/python/layers)): Neural network layers with uncertainty over the functions they represent, extending TensorFlow Layers. diff --git a/setup.py b/setup.py index 7b7aa83563..145516fa9c 100644 --- a/setup.py +++ b/setup.py @@ -32,6 +32,7 @@ 'numpy >= 1.13.3', 'decorator', 'cloudpickle == 1.1.1', + 'gast >= 0.2, < 0.3' # For autobatching ] if '--release' in sys.argv: diff --git a/tensorflow_probability/python/__init__.py b/tensorflow_probability/python/__init__.py index a0b4042e8a..5d3e64ead2 100644 --- a/tensorflow_probability/python/__init__.py +++ b/tensorflow_probability/python/__init__.py @@ -21,7 +21,6 @@ from tensorflow_probability.python import bijectors from tensorflow_probability.python import debugging from tensorflow_probability.python import distributions -from tensorflow_probability.python import edward2 from tensorflow_probability.python import experimental from tensorflow_probability.python import glm from tensorflow_probability.python import layers @@ -29,15 +28,21 @@ from tensorflow_probability.python import mcmc from tensorflow_probability.python import monte_carlo from tensorflow_probability.python import optimizer -from tensorflow_probability.python import positive_semidefinite_kernels from tensorflow_probability.python import stats from tensorflow_probability.python import sts from tensorflow_probability.python import trainable_distributions from tensorflow_probability.python import util from tensorflow_probability.python import vi +from tensorflow.python.util import lazy_loader # pylint: disable=g-direct-tensorflow-import from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import +edward2 = lazy_loader.LazyLoader('edward2', globals(), + 'tensorflow_probability.python.edward2') +positive_semidefinite_kernels = lazy_loader.LazyLoader( + 'positive_semidefinite_kernels', globals(), + 'tensorflow_probability.python.positive_semidefinite_kernels') + _allowed_symbols = [ 'bijectors', 'debugging', diff --git a/tensorflow_probability/python/bijectors/BUILD b/tensorflow_probability/python/bijectors/BUILD index 226aa61d1b..954d65141d 100644 --- a/tensorflow_probability/python/bijectors/BUILD +++ b/tensorflow_probability/python/bijectors/BUILD @@ -121,6 +121,7 @@ py_library( "//tensorflow_probability/python/internal:hypothesis_testlib", "//tensorflow_probability/python/internal:tensor_util", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -132,7 +133,10 @@ py_test( main = "bijector_properties_test.py", shard_count = 5, tags = ["hypothesis"], - deps = [":bijector_properties_testlib"], + deps = [ + ":bijector_properties_testlib", + "//tensorflow_probability/python/internal:test_case", + ], ) py_test( @@ -143,7 +147,10 @@ py_test( main = "bijector_properties_test.py", shard_count = 8, tags = ["hypothesis"], - deps = [":bijector_properties_testlib"], + deps = [ + ":bijector_properties_testlib", + "//tensorflow_probability/python/internal:test_case", + ], ) test_suite( @@ -306,7 +313,9 @@ py_library( py_library( name = "exp", srcs = ["exp.py"], - deps = [":power_transform"], + deps = [ + ":power_transform", + ], ) py_library( @@ -365,7 +374,9 @@ py_library( py_library( name = "invert", srcs = ["invert.py"], - deps = [":bijector"], + deps = [ + ":bijector", + ], ) py_library( @@ -734,6 +745,7 @@ py_test( deps = [ ":bijectors", # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -747,6 +759,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -758,6 +771,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -787,6 +801,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability/python/distributions", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -801,6 +816,7 @@ py_test( # absl/testing:parameterized dep, # tensorflow dep, "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -827,6 +843,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -838,6 +855,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -849,6 +867,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -862,6 +881,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability/python/distributions:lkj", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -874,6 +894,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -888,6 +909,7 @@ py_test( # numpy dep, # scipy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -900,6 +922,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -913,6 +936,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -925,6 +949,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -938,6 +963,7 @@ py_test( # numpy dep, # scipy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -949,6 +975,7 @@ py_test( ":bijector_test_util", ":bijectors", # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -961,6 +988,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -974,6 +1002,7 @@ py_test( # tensorflow dep, "//tensorflow_probability/python/distributions", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1005,6 +1034,7 @@ py_test( # tensorflow dep, "//tensorflow_probability/python/distributions", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1018,6 +1048,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1031,6 +1062,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1043,6 +1075,7 @@ py_test( # numpy dep, # scipy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1055,6 +1088,7 @@ py_test( # tensorflow dep, "//tensorflow_probability", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1067,6 +1101,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1079,6 +1114,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1092,6 +1128,7 @@ py_test( # tensorflow dep, "//tensorflow_probability/python/distributions", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1122,6 +1159,7 @@ py_test( # absl/testing:parameterized dep, # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1134,6 +1172,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1145,6 +1184,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1158,6 +1198,7 @@ py_test( # numpy dep, # scipy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1171,6 +1212,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1184,6 +1226,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1197,6 +1240,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1209,6 +1253,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1221,6 +1266,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1234,6 +1280,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/math:gradient", ], ) @@ -1247,6 +1294,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1260,6 +1308,7 @@ py_test( ":bijectors", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1276,6 +1325,7 @@ py_test( # tensorflow dep, "//tensorflow_probability/python/internal:hypothesis_testlib", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1290,6 +1340,7 @@ py_test( # tensorflow dep, "//tensorflow_probability/python/distributions", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1303,6 +1354,7 @@ py_test( # numpy dep, # scipy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) diff --git a/tensorflow_probability/python/bijectors/absolute_value_test.py b/tensorflow_probability/python/bijectors/absolute_value_test.py index b7ac641b11..d2650a08d8 100644 --- a/tensorflow_probability/python/bijectors/absolute_value_test.py +++ b/tensorflow_probability/python/bijectors/absolute_value_test.py @@ -21,11 +21,13 @@ import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class AbsoluteValueTest(tf.test.TestCase): +class AbsoluteValueTest(test_case.TestCase): """Tests correctness of the absolute value bijector.""" def testBijectorVersusNumpyRewriteOfBasicFunctionsEventNdims0(self): diff --git a/tensorflow_probability/python/bijectors/affine_linear_operator_test.py b/tensorflow_probability/python/bijectors/affine_linear_operator_test.py index d4b9da5a20..180925e7e7 100644 --- a/tensorflow_probability/python/bijectors/affine_linear_operator_test.py +++ b/tensorflow_probability/python/bijectors/affine_linear_operator_test.py @@ -24,12 +24,13 @@ import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class AffineLinearOperatorTest(tf.test.TestCase): +class AffineLinearOperatorTest(test_case.TestCase): def testIdentity(self): affine = tfb.AffineLinearOperator(validate_args=True) diff --git a/tensorflow_probability/python/bijectors/affine_test.py b/tensorflow_probability/python/bijectors/affine_test.py index 31d042eedf..8a7cb02487 100644 --- a/tensorflow_probability/python/bijectors/affine_test.py +++ b/tensorflow_probability/python/bijectors/affine_test.py @@ -22,14 +22,16 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class AffineBijectorTest(tf.test.TestCase): +class AffineBijectorTest(test_case.TestCase): """Tests correctness of the Y = scale @ x + shift transformation.""" def testProperties(self): @@ -44,7 +46,7 @@ def static_run(fun, x, **kwargs): def dynamic_run(fun, x_value, **kwargs): x_value = np.array(x_value, dtype=np.float32) - placeholder = tf.compat.v1.placeholder_with_default(x_value, shape=None) + placeholder = tf1.placeholder_with_default(x_value, shape=None) return self.evaluate(fun(placeholder, **kwargs)) for run in (static_run, dynamic_run): @@ -73,7 +75,7 @@ def static_run(fun, x, **kwargs): def dynamic_run(fun, x_value, **kwargs): x_value = np.array(x_value, dtype=np.float32) - placeholder = tf.compat.v1.placeholder_with_default(x_value, shape=None) + placeholder = tf1.placeholder_with_default(x_value, shape=None) return self.evaluate(fun(placeholder, **kwargs)) for run in (static_run, dynamic_run): @@ -112,9 +114,9 @@ def testNoBatchMultivariateFullDynamic(self): mu_value = np.array([1., -1], dtype=np.float32) scale_diag_value = np.array([2., 2], dtype=np.float32) - x = tf.compat.v1.placeholder_with_default(x_value, shape=None) - mu = tf.compat.v1.placeholder_with_default(mu_value, shape=None) - scale_diag = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default(x_value, shape=None) + mu = tf1.placeholder_with_default(mu_value, shape=None) + scale_diag = tf1.placeholder_with_default( scale_diag_value, shape=None) bijector = tfb.Affine(shift=mu, scale_diag=scale_diag) @@ -130,7 +132,7 @@ def static_run(fun, x, **kwargs): def dynamic_run(fun, x_value, **kwargs): x_value = np.array(x_value, dtype=np.float32) - placeholder = tf.compat.v1.placeholder_with_default(x_value, shape=None) + placeholder = tf1.placeholder_with_default(x_value, shape=None) return self.evaluate(fun(placeholder, **kwargs)) for run in (static_run, dynamic_run): @@ -151,7 +153,7 @@ def static_run(fun, x, **kwargs): def dynamic_run(fun, x_value, **kwargs): x_value = np.array(x_value, dtype=np.float32) - placeholder = tf.compat.v1.placeholder_with_default(x_value, shape=None) + placeholder = tf1.placeholder_with_default(x_value, shape=None) return self.evaluate(fun(placeholder, **kwargs)) for run in (static_run, dynamic_run): @@ -171,9 +173,9 @@ def testBatchMultivariateFullDynamic(self): mu_value = np.array([[1., -1]], dtype=np.float32) scale_diag_value = np.array([[2., 2]], dtype=np.float32) - x = tf.compat.v1.placeholder_with_default(x_value, shape=None) - mu = tf.compat.v1.placeholder_with_default(mu_value, shape=None) - scale_diag = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default(x_value, shape=None) + mu = tf1.placeholder_with_default(mu_value, shape=None) + scale_diag = tf1.placeholder_with_default( scale_diag_value, shape=None) bijector = tfb.Affine(shift=mu, scale_diag=scale_diag) @@ -190,7 +192,7 @@ def static_run(fun, x, **kwargs): def dynamic_run(fun, x_value, **kwargs): x_value = np.array(x_value, dtype=np.float32) - placeholder = tf.compat.v1.placeholder_with_default(x_value, shape=None) + placeholder = tf1.placeholder_with_default(x_value, shape=None) return self.evaluate(fun(placeholder, **kwargs)) for run in (static_run, dynamic_run): @@ -211,7 +213,7 @@ def static_run(fun, x, **kwargs): def dynamic_run(fun, x_value, **kwargs): x_value = np.array(x_value, dtype=np.float32) - placeholder = tf.compat.v1.placeholder_with_default(x_value, shape=None) + placeholder = tf1.placeholder_with_default(x_value, shape=None) return self.evaluate(fun(placeholder, **kwargs)) for run in (static_run, dynamic_run): @@ -234,7 +236,7 @@ def static_run(fun, x, **kwargs): def dynamic_run(fun, x_value, **kwargs): x_value = np.array(x_value, dtype=np.float32) - placeholder = tf.compat.v1.placeholder_with_default(x_value, shape=None) + placeholder = tf1.placeholder_with_default(x_value, shape=None) return self.evaluate(fun(placeholder, **kwargs)) for run in (static_run, dynamic_run): @@ -255,7 +257,7 @@ def static_run(fun, x, **kwargs): def dynamic_run(fun, x_value, **kwargs): x_value = np.array(x_value, dtype=np.float32) - placeholder = tf.compat.v1.placeholder_with_default(x_value, shape=None) + placeholder = tf1.placeholder_with_default(x_value, shape=None) return self.evaluate(fun(placeholder, **kwargs)) for run in (static_run, dynamic_run): @@ -279,7 +281,7 @@ def static_run(fun, x, **kwargs): def dynamic_run(fun, x_value, **kwargs): x_value = np.array(x_value, dtype=np.float32) - placeholder = tf.compat.v1.placeholder_with_default(x_value, shape=None) + placeholder = tf1.placeholder_with_default(x_value, shape=None) return self.evaluate(fun(placeholder, **kwargs)) for run in (static_run, dynamic_run): @@ -313,7 +315,7 @@ def static_run(fun, x, **kwargs): def dynamic_run(fun, x_value, **kwargs): x_value = np.array(x_value, dtype=np.float32) - placeholder = tf.compat.v1.placeholder_with_default(x_value, shape=None) + placeholder = tf1.placeholder_with_default(x_value, shape=None) return self.evaluate(fun(placeholder, **kwargs)) for run in (static_run, dynamic_run): @@ -346,7 +348,7 @@ def static_run(fun, x, **kwargs): def dynamic_run(fun, x_value, **kwargs): x_value = np.array(x_value, dtype=np.float32) - placeholder = tf.compat.v1.placeholder_with_default(x_value, shape=None) + placeholder = tf1.placeholder_with_default(x_value, shape=None) return self.evaluate(fun(placeholder, **kwargs)) for run in (static_run, dynamic_run): @@ -380,7 +382,7 @@ def static_run(fun, x, **kwargs): def dynamic_run(fun, x_value, **kwargs): x_value = np.array(x_value, dtype=np.float32) - placeholder = tf.compat.v1.placeholder_with_default(x_value, shape=None) + placeholder = tf1.placeholder_with_default(x_value, shape=None) return self.evaluate(fun(placeholder, **kwargs)) for run in (static_run, dynamic_run): @@ -575,7 +577,7 @@ def static_run(fun, x, **kwargs): def dynamic_run(fun, x_value, **kwargs): x_value = np.array(x_value, dtype=np.float32) - placeholder = tf.compat.v1.placeholder_with_default(x_value, shape=None) + placeholder = tf1.placeholder_with_default(x_value, shape=None) return self.evaluate(fun(placeholder, **kwargs)) for run in (static_run, dynamic_run): @@ -611,9 +613,9 @@ def dynamic_run(fun, x_value, **kwargs): def _testScaledIdentityComplexAdjoint(self, is_dynamic): shift_ = np.array(-0.5, dtype=np.complex) scale_ = np.array(4 + 2j, dtype=np.complex) - shift = tf.compat.v1.placeholder_with_default( + shift = tf1.placeholder_with_default( shift_, shape=None if is_dynamic else []) - scale = tf.compat.v1.placeholder_with_default( + scale = tf1.placeholder_with_default( scale_, shape=None if is_dynamic else []) bijector = tfb.Affine( shift=shift, diff --git a/tensorflow_probability/python/bijectors/batch_normalization_test.py b/tensorflow_probability/python/bijectors/batch_normalization_test.py index b9f52a13bb..3b5d30163f 100644 --- a/tensorflow_probability/python/bijectors/batch_normalization_test.py +++ b/tensorflow_probability/python/bijectors/batch_normalization_test.py @@ -19,22 +19,24 @@ from __future__ import print_function # Dependency imports + from absl.testing import parameterized import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python import distributions from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util + from tensorflow.python.framework import test_util as tf_test_util # pylint: disable=g-direct-tensorflow-import @tf_test_util.run_all_in_graph_and_eager_modes class BatchNormTest(test_util.VectorDistributionTestHelpers, parameterized.TestCase, - tf.test.TestCase): + test_case.TestCase): def _reduction_axes(self, input_shape, event_dims): if isinstance(event_dims, int): diff --git a/tensorflow_probability/python/bijectors/bijector_properties_test.py b/tensorflow_probability/python/bijectors/bijector_properties_test.py index f252807988..d281ccc042 100644 --- a/tensorflow_probability/python/bijectors/bijector_properties_test.py +++ b/tensorflow_probability/python/bijectors/bijector_properties_test.py @@ -23,15 +23,14 @@ import hypothesis as hp from hypothesis import strategies as hps import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp +from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python.bijectors import hypothesis_testlib as bijector_hps from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps from tensorflow_probability.python.internal import tensor_util from tensorflow_probability.python.internal import tensorshape_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import +from tensorflow_probability.python.internal import test_case -tfb = tfp.bijectors -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import flags.DEFINE_enum('tf_mode', 'graph', ['eager', 'graph'], 'TF execution mode to use') @@ -309,7 +308,7 @@ def assert_no_none_grad(bijector, method, wrt_vars, grads): @test_util.run_all_in_graph_and_eager_modes -class BijectorPropertiesTest(tf.test.TestCase, parameterized.TestCase): +class BijectorPropertiesTest(test_case.TestCase, parameterized.TestCase): @parameterized.named_parameters( {'testcase_name': bname, 'bijector_name': bname} diff --git a/tensorflow_probability/python/bijectors/bijector_test.py b/tensorflow_probability/python/bijectors/bijector_test.py index f0539d2b66..0072300b1d 100644 --- a/tensorflow_probability/python/bijectors/bijector_test.py +++ b/tensorflow_probability/python/bijectors/bijector_test.py @@ -428,7 +428,7 @@ def _forward_log_det_jacobian(self, _, arg1, arg2): # Test that ensures kwargs from public methods are passed in to # private methods. @test_util.run_all_in_graph_and_eager_modes -class ConditionalBijectorTest(tf.test.TestCase): +class ConditionalBijectorTest(test_case.TestCase): def testConditionalBijector(self): b = _ConditionalBijector() diff --git a/tensorflow_probability/python/bijectors/blockwise_test.py b/tensorflow_probability/python/bijectors/blockwise_test.py index 26b5d0acf4..f89d9cc22f 100644 --- a/tensorflow_probability/python/bijectors/blockwise_test.py +++ b/tensorflow_probability/python/bijectors/blockwise_test.py @@ -19,23 +19,25 @@ from __future__ import print_function # Dependency imports -from absl.testing import parameterized -import tensorflow as tf +from absl.testing import parameterized +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class BlockwiseBijectorTest(tf.test.TestCase, parameterized.TestCase): +class BlockwiseBijectorTest(test_case.TestCase, parameterized.TestCase): @parameterized.parameters((False, []), (True, []), (False, [2]), (True, [2])) def testExplicitBlocks(self, dynamic_shape, batch_shape): block_sizes = tf.convert_to_tensor(value=[2, 1, 3]) - block_sizes = tf.compat.v1.placeholder_with_default( + block_sizes = tf1.placeholder_with_default( block_sizes, shape=None if dynamic_shape else block_sizes.shape) exp = tfb.Exp() sp = tfb.Softplus() @@ -46,7 +48,7 @@ def testExplicitBlocks(self, dynamic_shape, batch_shape): for s in batch_shape: x = tf.expand_dims(x, 0) x = tf.tile(x, [s] + [1] * (tensorshape_util.rank(x.shape) - 1)) - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( x, shape=None if dynamic_shape else x.shape) # Identity to break the caching. @@ -105,7 +107,7 @@ def testBijectiveAndFinite(self): blockwise = tfb.Blockwise(bijectors=[exp, sp, aff], block_sizes=[2, 1, 3]) x = tf.cast([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype=tf.float32) - x = tf.compat.v1.placeholder_with_default(x, shape=x.shape) + x = tf1.placeholder_with_default(x, shape=x.shape) # Identity to break the caching. blockwise_y = tf.identity(blockwise.forward(x)) @@ -161,13 +163,13 @@ def testRaisesBadBlocks(self): def testRaisesBadBlocksDynamic(self): if tf.executing_eagerly(): return with self.assertRaises(tf.errors.InvalidArgumentError): - block_sizes = tf.compat.v1.placeholder_with_default([1, 2], shape=None) + block_sizes = tf1.placeholder_with_default([1, 2], shape=None) blockwise = tfb.Blockwise( bijectors=[tfb.Exp()], block_sizes=block_sizes, validate_args=True) self.evaluate(blockwise.block_sizes) with self.assertRaises(tf.errors.InvalidArgumentError): - block_sizes = tf.compat.v1.placeholder_with_default([[1]], shape=None) + block_sizes = tf1.placeholder_with_default([[1]], shape=None) blockwise = tfb.Blockwise( bijectors=[tfb.Exp()], block_sizes=block_sizes, validate_args=True) self.evaluate(blockwise.block_sizes) diff --git a/tensorflow_probability/python/bijectors/chain_test.py b/tensorflow_probability/python/bijectors/chain_test.py index c4b5cbd543..db7b92ed69 100644 --- a/tensorflow_probability/python/bijectors/chain_test.py +++ b/tensorflow_probability/python/bijectors/chain_test.py @@ -19,12 +19,15 @@ from __future__ import print_function # Dependency imports -import numpy as np -import tensorflow as tf +import numpy as np +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @@ -39,7 +42,7 @@ def __init__(self, forward_min_event_ndims=0, inverse_min_event_ndims=3): @test_util.run_all_in_graph_and_eager_modes -class ChainBijectorTest(tf.test.TestCase): +class ChainBijectorTest(test_case.TestCase): """Tests the correctness of the Y = Chain(bij1, bij2, bij3) transformation.""" def testBijector(self): @@ -186,7 +189,7 @@ def testChainAffineExp(self): def testChainIldjWithPlaceholder(self): chain = tfb.Chain((tfb.Exp(), tfb.Exp())) - samples = tf.compat.v1.placeholder_with_default( + samples = tf1.placeholder_with_default( np.zeros([2, 10], np.float32), shape=None) ildj = chain.inverse_log_det_jacobian(samples, event_ndims=0) self.assertTrue(ildj is not None) @@ -197,7 +200,7 @@ def testChainDynamicToStatic(self): return def xform_dynamic(x): - return tf.compat.v1.placeholder_with_default(x, shape=None) + return tf1.placeholder_with_default(x, shape=None) def xform_static(x): tensorshape_util.set_shape(x, [1]) diff --git a/tensorflow_probability/python/bijectors/cholesky_outer_product_test.py b/tensorflow_probability/python/bijectors/cholesky_outer_product_test.py index 5521c52749..e01b612dc5 100644 --- a/tensorflow_probability/python/bijectors/cholesky_outer_product_test.py +++ b/tensorflow_probability/python/bijectors/cholesky_outer_product_test.py @@ -23,11 +23,12 @@ import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class CholeskyOuterProductBijectorTest(tf.test.TestCase): +class CholeskyOuterProductBijectorTest(test_case.TestCase): """Tests the correctness of the Y = X @ X.T transformation.""" def testBijectorMatrix(self): diff --git a/tensorflow_probability/python/bijectors/cholesky_to_inv_cholesky_test.py b/tensorflow_probability/python/bijectors/cholesky_to_inv_cholesky_test.py index fcf6e96181..ea9aa41139 100644 --- a/tensorflow_probability/python/bijectors/cholesky_to_inv_cholesky_test.py +++ b/tensorflow_probability/python/bijectors/cholesky_to_inv_cholesky_test.py @@ -22,12 +22,12 @@ import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class CholeskyToInvCholeskyTest(tf.test.TestCase): +class CholeskyToInvCholeskyTest(test_case.TestCase): def testBijector(self): bijector = tfb.CholeskyToInvCholesky() diff --git a/tensorflow_probability/python/bijectors/correlation_cholesky_test.py b/tensorflow_probability/python/bijectors/correlation_cholesky_test.py index 2ebcad34cd..9953d53145 100644 --- a/tensorflow_probability/python/bijectors/correlation_cholesky_test.py +++ b/tensorflow_probability/python/bijectors/correlation_cholesky_test.py @@ -21,20 +21,23 @@ import itertools # Dependency imports + from absl.testing import parameterized import numpy as np import tensorflow.compat.v2 as tf - from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.distributions import lkj from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class CorrelationCholeskyBijectorTest(parameterized.TestCase, tf.test.TestCase): +class CorrelationCholeskyBijectorTest(parameterized.TestCase, + test_case.TestCase): """Tests the correctness of the CorrelationCholesky bijector.""" def testBijector(self): diff --git a/tensorflow_probability/python/bijectors/cumsum_test.py b/tensorflow_probability/python/bijectors/cumsum_test.py index 6f8af630b6..36904cc95e 100644 --- a/tensorflow_probability/python/bijectors/cumsum_test.py +++ b/tensorflow_probability/python/bijectors/cumsum_test.py @@ -19,18 +19,20 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class _CumsumBijectorTest(tf.test.TestCase): +class _CumsumBijectorTest(test_case.TestCase): """Tests correctness of the cumsum bijector.""" def testInvalidAxis(self): diff --git a/tensorflow_probability/python/bijectors/discrete_cosine_transform_test.py b/tensorflow_probability/python/bijectors/discrete_cosine_transform_test.py index ca67d9a51e..a9ea547376 100644 --- a/tensorflow_probability/python/bijectors/discrete_cosine_transform_test.py +++ b/tensorflow_probability/python/bijectors/discrete_cosine_transform_test.py @@ -19,17 +19,19 @@ from __future__ import print_function # Dependency imports + import numpy as np from scipy import fftpack import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class DiscreteCosineTransformTest(tf.test.TestCase): +class DiscreteCosineTransformTest(test_case.TestCase): """Tests correctness of the DiscreteCosineTransform bijector.""" def testBijector(self): diff --git a/tensorflow_probability/python/bijectors/exp_test.py b/tensorflow_probability/python/bijectors/exp_test.py index 5e218cea99..3724d4e5c7 100644 --- a/tensorflow_probability/python/bijectors/exp_test.py +++ b/tensorflow_probability/python/bijectors/exp_test.py @@ -19,17 +19,19 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class ExpBijectorTest(tf.test.TestCase): +class ExpBijectorTest(test_case.TestCase): """Tests correctness of the Y = g(X) = exp(X) transformation.""" def testBijector(self): diff --git a/tensorflow_probability/python/bijectors/expm1_test.py b/tensorflow_probability/python/bijectors/expm1_test.py index 54e5ce1c5f..c0ce160cad 100644 --- a/tensorflow_probability/python/bijectors/expm1_test.py +++ b/tensorflow_probability/python/bijectors/expm1_test.py @@ -19,16 +19,18 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class Expm1BijectorTest(tf.test.TestCase): +class Expm1BijectorTest(test_case.TestCase): """Tests correctness of the Y = g(X) = expm1(X) transformation.""" def testBijector(self): diff --git a/tensorflow_probability/python/bijectors/fill_triangular_test.py b/tensorflow_probability/python/bijectors/fill_triangular_test.py index b2375da2bd..e5be16cd37 100644 --- a/tensorflow_probability/python/bijectors/fill_triangular_test.py +++ b/tensorflow_probability/python/bijectors/fill_triangular_test.py @@ -25,11 +25,12 @@ from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class FillTriangularBijectorTest(tf.test.TestCase): +class FillTriangularBijectorTest(test_case.TestCase): """Tests the correctness of the FillTriangular bijector.""" def testBijector(self): diff --git a/tensorflow_probability/python/bijectors/gumbel_test.py b/tensorflow_probability/python/bijectors/gumbel_test.py index 76236fdf49..a06459b221 100644 --- a/tensorflow_probability/python/bijectors/gumbel_test.py +++ b/tensorflow_probability/python/bijectors/gumbel_test.py @@ -19,19 +19,20 @@ from __future__ import print_function # Dependency imports + import numpy as np from scipy import stats - import tensorflow.compat.v2 as tf - from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class GumbelTest(tf.test.TestCase): +class GumbelTest(test_case.TestCase): """Tests correctness of the Gumbel bijector.""" def testBijector(self): diff --git a/tensorflow_probability/python/bijectors/identity_test.py b/tensorflow_probability/python/bijectors/identity_test.py index 157e22bbb8..5efa7e75e8 100644 --- a/tensorflow_probability/python/bijectors/identity_test.py +++ b/tensorflow_probability/python/bijectors/identity_test.py @@ -19,15 +19,17 @@ from __future__ import print_function # Dependency imports -import tensorflow.compat.v2 as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class IdentityTest(tf.test.TestCase): +class IdentityTest(test_case.TestCase): """Tests correctness of the Y = g(X) = X transformation.""" def testBijector(self): diff --git a/tensorflow_probability/python/bijectors/inline_test.py b/tensorflow_probability/python/bijectors/inline_test.py index 0cd8a7d76f..6fccb6edc4 100644 --- a/tensorflow_probability/python/bijectors/inline_test.py +++ b/tensorflow_probability/python/bijectors/inline_test.py @@ -25,11 +25,12 @@ from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class InlineBijectorTest(tf.test.TestCase): +class InlineBijectorTest(test_case.TestCase): """Tests correctness of the inline constructed bijector.""" def testBijector(self): diff --git a/tensorflow_probability/python/bijectors/invert_test.py b/tensorflow_probability/python/bijectors/invert_test.py index 7cced65964..b30c12c3d9 100644 --- a/tensorflow_probability/python/bijectors/invert_test.py +++ b/tensorflow_probability/python/bijectors/invert_test.py @@ -21,15 +21,16 @@ import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd - from tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class InvertBijectorTest(tf.test.TestCase): +class InvertBijectorTest(test_case.TestCase): """Tests the correctness of the Y = Invert(bij) transformation.""" def testBijector(self): diff --git a/tensorflow_probability/python/bijectors/iterated_sigmoid_centered_test.py b/tensorflow_probability/python/bijectors/iterated_sigmoid_centered_test.py index 3f5e360f74..845796d1d1 100644 --- a/tensorflow_probability/python/bijectors/iterated_sigmoid_centered_test.py +++ b/tensorflow_probability/python/bijectors/iterated_sigmoid_centered_test.py @@ -19,15 +19,17 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow_probability.python.math.gradient import batch_jacobian + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @@ -127,13 +129,13 @@ def testJacobianConsistent(self): class IteratedSigmoidCenteredBijectorTestFloat32( - tf.test.TestCase, + test_case.TestCase, _IteratedSigmoidCenteredBijectorTest): dtype = np.float32 class IteratedSigmoidCenteredBijectorTestFloat64( - tf.test.TestCase, + test_case.TestCase, _IteratedSigmoidCenteredBijectorTest): dtype = np.float64 diff --git a/tensorflow_probability/python/bijectors/masked_autoregressive_test.py b/tensorflow_probability/python/bijectors/masked_autoregressive_test.py index 2877f03fd8..aa9a59dcf6 100644 --- a/tensorflow_probability/python/bijectors/masked_autoregressive_test.py +++ b/tensorflow_probability/python/bijectors/masked_autoregressive_test.py @@ -19,20 +19,23 @@ from __future__ import print_function # Dependency imports + import numpy as np import six import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd - from tensorflow_probability.python.bijectors.masked_autoregressive import _gen_mask from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + tfk = tf.keras + tfkl = tf.keras.layers @@ -87,7 +90,7 @@ def _bijector_fn(x): @test_util.run_all_in_graph_and_eager_modes -class GenMaskTest(tf.test.TestCase): +class GenMaskTest(test_case.TestCase): def test346Exclusive(self): expected_mask = np.array( @@ -114,7 +117,7 @@ def test346Inclusive(self): @test_util.run_all_in_graph_and_eager_modes class MaskedAutoregressiveFlowTest(tfp_test_util.VectorDistributionTestHelpers, - tf.test.TestCase): + test_case.TestCase): event_shape = [4] @@ -375,7 +378,7 @@ def _autoregressive_flow_kwargs(self): @test_util.run_all_in_graph_and_eager_modes -class AutoregressiveNetworkTest(tf.test.TestCase): +class AutoregressiveNetworkTest(test_case.TestCase): def _count_trainable_params(self, layer): ret = 0 diff --git a/tensorflow_probability/python/bijectors/matrix_inverse_tril_test.py b/tensorflow_probability/python/bijectors/matrix_inverse_tril_test.py index 0986c3429d..94bc118805 100644 --- a/tensorflow_probability/python/bijectors/matrix_inverse_tril_test.py +++ b/tensorflow_probability/python/bijectors/matrix_inverse_tril_test.py @@ -19,16 +19,19 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class MatrixInverseTriLBijectorTest(tf.test.TestCase): +class MatrixInverseTriLBijectorTest(test_case.TestCase): """Tests the correctness of the Y = inv(tril) transformation.""" # The inverse of 0 is undefined, as the numbers above the main diff --git a/tensorflow_probability/python/bijectors/matveclu_test.py b/tensorflow_probability/python/bijectors/matveclu_test.py index 34b3d6468c..3d135b007a 100644 --- a/tensorflow_probability/python/bijectors/matveclu_test.py +++ b/tensorflow_probability/python/bijectors/matveclu_test.py @@ -19,11 +19,12 @@ from __future__ import print_function # Dependency imports -import numpy as np +import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -51,7 +52,7 @@ def trainable_lu_factorization( @test_util.run_all_in_graph_and_eager_modes -class MatvecLUTest(tf.test.TestCase): +class MatvecLUTest(test_case.TestCase): def test_invertible_from_trainable_lu_factorization(self): channels = 3 diff --git a/tensorflow_probability/python/bijectors/normal_cdf_test.py b/tensorflow_probability/python/bijectors/normal_cdf_test.py index d3bc0941b4..f3971f04ea 100644 --- a/tensorflow_probability/python/bijectors/normal_cdf_test.py +++ b/tensorflow_probability/python/bijectors/normal_cdf_test.py @@ -19,17 +19,19 @@ from __future__ import print_function # Dependency imports + import numpy as np from scipy import stats import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class NormalCDFBijectorTest(tf.test.TestCase): +class NormalCDFBijectorTest(test_case.TestCase): """Tests correctness of the NormalCDF bijector.""" def testBijector(self): diff --git a/tensorflow_probability/python/bijectors/ordered_test.py b/tensorflow_probability/python/bijectors/ordered_test.py index 2a2b419428..5f5a279475 100644 --- a/tensorflow_probability/python/bijectors/ordered_test.py +++ b/tensorflow_probability/python/bijectors/ordered_test.py @@ -19,22 +19,20 @@ from __future__ import print_function # Dependency imports -import numpy as np +import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.internal import tensorshape_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top +from tensorflow_probability.python.internal import test_case -tfb = tfp.bijectors -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class OrderedBijectorTest(tf.test.TestCase): +class OrderedBijectorTest(test_case.TestCase): """Tests correctness of the ordered transformation.""" def setUp(self): diff --git a/tensorflow_probability/python/bijectors/permute_test.py b/tensorflow_probability/python/bijectors/permute_test.py index 3ac0a06262..7fe364dcee 100644 --- a/tensorflow_probability/python/bijectors/permute_test.py +++ b/tensorflow_probability/python/bijectors/permute_test.py @@ -19,16 +19,19 @@ from __future__ import print_function # Dependency imports + import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class PermuteBijectorTest(tf.test.TestCase): +class PermuteBijectorTest(test_case.TestCase): """Tests correctness of the Permute bijector.""" def assertRaisesError(self, msg): @@ -42,7 +45,7 @@ def testBijector(self): expected_x = np.random.randn(4, 2, 3) expected_y = expected_x[..., expected_permutation] - permutation_ph = tf.compat.v1.placeholder_with_default( + permutation_ph = tf1.placeholder_with_default( expected_permutation, shape=None) bijector = tfb.Permute(permutation=permutation_ph, validate_args=True) [ @@ -67,7 +70,7 @@ def testBijector(self): def testRaisesOpError(self): with self.assertRaisesError("Permutation over `d` must contain"): - permutation = tf.compat.v1.placeholder_with_default([1, 2], shape=None) + permutation = tf1.placeholder_with_default([1, 2], shape=None) bijector = tfb.Permute(permutation=permutation, validate_args=True) self.evaluate(bijector.inverse([1.])) diff --git a/tensorflow_probability/python/bijectors/power_transform_test.py b/tensorflow_probability/python/bijectors/power_transform_test.py index 84718ebcb1..90dcaf54b6 100644 --- a/tensorflow_probability/python/bijectors/power_transform_test.py +++ b/tensorflow_probability/python/bijectors/power_transform_test.py @@ -19,16 +19,18 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class PowerTransformBijectorTest(tf.test.TestCase): +class PowerTransformBijectorTest(test_case.TestCase): """Tests correctness of the power transformation.""" def testBijector(self): diff --git a/tensorflow_probability/python/bijectors/real_nvp_test.py b/tensorflow_probability/python/bijectors/real_nvp_test.py index cf4db5bbc6..967bc9423c 100644 --- a/tensorflow_probability/python/bijectors/real_nvp_test.py +++ b/tensorflow_probability/python/bijectors/real_nvp_test.py @@ -26,13 +26,13 @@ from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util - from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class RealNVPTest(tf.test.TestCase): +class RealNVPTest(test_case.TestCase): def testBijectorWithTrivialTransform(self): flat_x_ = np.random.normal(0., 1., 8).astype(np.float32) @@ -141,7 +141,7 @@ def bijector_fn(*args, **kwargs): @test_util.run_all_in_graph_and_eager_modes class RealNVPTestKwargs( tfp_test_util.VectorDistributionTestHelpers, - tf.test.TestCase): + test_case.TestCase): @property def _real_nvp_kwargs(self): diff --git a/tensorflow_probability/python/bijectors/reciprocal_test.py b/tensorflow_probability/python/bijectors/reciprocal_test.py index d1cd61905c..7d0fda932f 100644 --- a/tensorflow_probability/python/bijectors/reciprocal_test.py +++ b/tensorflow_probability/python/bijectors/reciprocal_test.py @@ -19,18 +19,19 @@ from __future__ import print_function # Dependency imports + from absl.testing import parameterized import numpy as np import tensorflow.compat.v2 as tf - from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class ReciprocalTest(tf.test.TestCase, parameterized.TestCase): +class ReciprocalTest(test_case.TestCase, parameterized.TestCase): """Tests correctness of the `b(x) = 1 / x` bijector.""" @parameterized.named_parameters( diff --git a/tensorflow_probability/python/bijectors/reshape_test.py b/tensorflow_probability/python/bijectors/reshape_test.py index 634d2afa62..b18701a091 100644 --- a/tensorflow_probability/python/bijectors/reshape_test.py +++ b/tensorflow_probability/python/bijectors/reshape_test.py @@ -19,11 +19,14 @@ from __future__ import print_function # Dependency imports + import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -218,7 +221,7 @@ def build_shapes(self, *args, **kwargs): @test_util.run_all_in_graph_and_eager_modes -class ReshapeBijectorTestStatic(tf.test.TestCase, _ReshapeBijectorTest): +class ReshapeBijectorTestStatic(test_case.TestCase, _ReshapeBijectorTest): def build_shapes(self, shape_in, shape_out): return shape_in, shape_out @@ -313,15 +316,15 @@ def testInputOutputMismatchOpError(self): "(Input to reshape|Cannot reshape a tensor with|cannot reshape array)") -class ReshapeBijectorTestDynamic(tf.test.TestCase, _ReshapeBijectorTest): +class ReshapeBijectorTestDynamic(test_case.TestCase, _ReshapeBijectorTest): def build_shapes(self, shape_in, shape_out): shape_in = np.array(shape_in, np.int32) shape_out = np.array(shape_out, np.int32) return ( - tf.compat.v1.placeholder_with_default( + tf1.placeholder_with_default( shape_in, shape=[len(shape_in)]), - tf.compat.v1.placeholder_with_default( + tf1.placeholder_with_default( shape_out, shape=[len(shape_out)]), ) @@ -382,7 +385,7 @@ def testInvalidDimensionsOpError(self): def testUnknownShapeRank(self): if tf.executing_eagerly(): return - unknown_shape = tf.compat.v1.placeholder_with_default([2, 2], shape=None) + unknown_shape = tf1.placeholder_with_default([2, 2], shape=None) known_shape = [2, 2] with self.assertRaisesRegexp(NotImplementedError, diff --git a/tensorflow_probability/python/bijectors/scale_tril_test.py b/tensorflow_probability/python/bijectors/scale_tril_test.py index ee680bcd2f..97557c9de1 100644 --- a/tensorflow_probability/python/bijectors/scale_tril_test.py +++ b/tensorflow_probability/python/bijectors/scale_tril_test.py @@ -22,11 +22,12 @@ import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class ScaleTriLBijectorTest(tf.test.TestCase): +class ScaleTriLBijectorTest(test_case.TestCase): """Tests the correctness of the ScaleTriL bijector.""" def testComputesCorrectValues(self): diff --git a/tensorflow_probability/python/bijectors/sigmoid_test.py b/tensorflow_probability/python/bijectors/sigmoid_test.py index f72f940275..c2165415dd 100644 --- a/tensorflow_probability/python/bijectors/sigmoid_test.py +++ b/tensorflow_probability/python/bijectors/sigmoid_test.py @@ -19,17 +19,19 @@ from __future__ import print_function # Dependency imports + import numpy as np from scipy import special import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class SigmoidBijectorTest(tf.test.TestCase): +class SigmoidBijectorTest(test_case.TestCase): """Tests correctness of the Y = g(X) = (1 + exp(-X))^-1 transformation.""" def testBijector(self): diff --git a/tensorflow_probability/python/bijectors/sinh_arcsinh_test.py b/tensorflow_probability/python/bijectors/sinh_arcsinh_test.py index 7acae26522..bf2a6196cb 100644 --- a/tensorflow_probability/python/bijectors/sinh_arcsinh_test.py +++ b/tensorflow_probability/python/bijectors/sinh_arcsinh_test.py @@ -19,16 +19,18 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class SinhArcsinhTest(tf.test.TestCase): +class SinhArcsinhTest(test_case.TestCase): """Tests correctness of the power transformation.""" def testBijectorVersusNumpyRewriteOfBasicFunctions(self): diff --git a/tensorflow_probability/python/bijectors/softfloor_test.py b/tensorflow_probability/python/bijectors/softfloor_test.py index 413911fb38..830b987741 100644 --- a/tensorflow_probability/python/bijectors/softfloor_test.py +++ b/tensorflow_probability/python/bijectors/softfloor_test.py @@ -19,15 +19,16 @@ from __future__ import print_function # Dependency imports -import numpy as np - -import tensorflow as tf +import numpy as np +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow_probability.python.math import value_and_gradient + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -126,11 +127,11 @@ def testBijectorForwardGradient(self): self.assertAllClose(_softfloor_grad_np(x_np, 1.2), grad) -class SoftFloor32Test(_SoftFloorBijectorBase, tf.test.TestCase): +class SoftFloor32Test(_SoftFloorBijectorBase, test_case.TestCase): dtype = np.float32 -class SoftFloor64Test(_SoftFloorBijectorBase, tf.test.TestCase): +class SoftFloor64Test(_SoftFloorBijectorBase, test_case.TestCase): dtype = np.float64 diff --git a/tensorflow_probability/python/bijectors/softmax_centered_test.py b/tensorflow_probability/python/bijectors/softmax_centered_test.py index b61ddb1882..a4330c06ce 100644 --- a/tensorflow_probability/python/bijectors/softmax_centered_test.py +++ b/tensorflow_probability/python/bijectors/softmax_centered_test.py @@ -19,14 +19,16 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -34,7 +36,7 @@ @test_util.run_all_in_graph_and_eager_modes -class SoftmaxCenteredBijectorTest(tf.test.TestCase): +class SoftmaxCenteredBijectorTest(test_case.TestCase): """Tests correctness of the Y = g(X) = exp(X) / sum(exp(X)) transformation.""" def testBijectorVector(self): diff --git a/tensorflow_probability/python/bijectors/softplus_test.py b/tensorflow_probability/python/bijectors/softplus_test.py index 9e1f5b6d30..bc9f883cd2 100644 --- a/tensorflow_probability/python/bijectors/softplus_test.py +++ b/tensorflow_probability/python/bijectors/softplus_test.py @@ -19,18 +19,20 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top rng = np.random.RandomState(42) @test_util.run_all_in_graph_and_eager_modes -class SoftplusBijectorTest(tf.test.TestCase): +class SoftplusBijectorTest(test_case.TestCase): """Tests the correctness of the Y = g(X) = Log[1 + exp(X)] transformation.""" def _softplus(self, x): diff --git a/tensorflow_probability/python/bijectors/softsign_test.py b/tensorflow_probability/python/bijectors/softsign_test.py index d9d5c4cd79..b575e5dfd1 100644 --- a/tensorflow_probability/python/bijectors/softsign_test.py +++ b/tensorflow_probability/python/bijectors/softsign_test.py @@ -19,16 +19,18 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class SoftsignBijectorTest(tf.test.TestCase): +class SoftsignBijectorTest(test_case.TestCase): """Tests the correctness of the Y = g(X) = X / (1 + |X|) transformation.""" def _softsign(self, x): diff --git a/tensorflow_probability/python/bijectors/square_test.py b/tensorflow_probability/python/bijectors/square_test.py index 55ddc46574..bd2a940c87 100644 --- a/tensorflow_probability/python/bijectors/square_test.py +++ b/tensorflow_probability/python/bijectors/square_test.py @@ -19,16 +19,18 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class SquareBijectorTest(tf.test.TestCase): +class SquareBijectorTest(test_case.TestCase): """Tests the correctness of the Y = X ** 2 transformation.""" def testBijectorScalar(self): diff --git a/tensorflow_probability/python/bijectors/tanh_test.py b/tensorflow_probability/python/bijectors/tanh_test.py index d809507fc5..08ea4419fb 100644 --- a/tensorflow_probability/python/bijectors/tanh_test.py +++ b/tensorflow_probability/python/bijectors/tanh_test.py @@ -19,16 +19,18 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class TanhBijectorTest(tf.test.TestCase): +class TanhBijectorTest(test_case.TestCase): """Tests correctness of the Y = g(X) = tanh(X) transformation.""" def testBijector(self): diff --git a/tensorflow_probability/python/bijectors/transform_diagonal_test.py b/tensorflow_probability/python/bijectors/transform_diagonal_test.py index e6505080c7..adefdaa8e1 100644 --- a/tensorflow_probability/python/bijectors/transform_diagonal_test.py +++ b/tensorflow_probability/python/bijectors/transform_diagonal_test.py @@ -19,6 +19,7 @@ from __future__ import print_function # Dependency imports + from absl import logging import hypothesis as hp import hypothesis.strategies as hps @@ -28,7 +29,9 @@ from tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.bijectors import hypothesis_testlib as bijector_hps from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @@ -37,7 +40,7 @@ def _preserves_vector_dim(dim): @test_util.run_all_in_graph_and_eager_modes -class TransformDiagonalBijectorTest(tf.test.TestCase): +class TransformDiagonalBijectorTest(test_case.TestCase): """Tests correctness of the TransformDiagonal bijector.""" def testBijector(self): diff --git a/tensorflow_probability/python/bijectors/transpose_test.py b/tensorflow_probability/python/bijectors/transpose_test.py index d1c1e81571..fa9f8efe71 100644 --- a/tensorflow_probability/python/bijectors/transpose_test.py +++ b/tensorflow_probability/python/bijectors/transpose_test.py @@ -21,11 +21,13 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -51,9 +53,9 @@ def testTransposeFromPerm(self): actual_y = tf.constant(actual_y_) perm = tf.constant(perm_) else: - actual_x = tf.compat.v1.placeholder_with_default(actual_x_, shape=None) - actual_y = tf.compat.v1.placeholder_with_default(actual_y_, shape=None) - perm = tf.compat.v1.placeholder_with_default(perm_, shape=[3]) + actual_x = tf1.placeholder_with_default(actual_x_, shape=None) + actual_y = tf1.placeholder_with_default(actual_y_, shape=None) + perm = tf1.placeholder_with_default(perm_, shape=[3]) bijector = tfb.Transpose(perm=perm, validate_args=True) y = bijector.forward(actual_x) @@ -88,8 +90,8 @@ def testTransposeFromEventNdim(self): actual_y = tf.constant(actual_y_) rightmost_transposed_ndims = tf.constant(rightmost_transposed_ndims_) else: - actual_x = tf.compat.v1.placeholder_with_default(actual_x_, shape=None) - actual_y = tf.compat.v1.placeholder_with_default(actual_y_, shape=None) + actual_x = tf1.placeholder_with_default(actual_x_, shape=None) + actual_y = tf1.placeholder_with_default(actual_y_, shape=None) rightmost_transposed_ndims = tf.constant(rightmost_transposed_ndims_) bijector = tfb.Transpose( @@ -116,7 +118,7 @@ def testInvalidPermException(self): else: with self.assertRaisesOpError(msg): bijector = tfb.Transpose( - perm=tf.compat.v1.placeholder_with_default([1, 2], shape=[2]), + perm=tf1.placeholder_with_default([1, 2], shape=[2]), validate_args=True) self.evaluate(bijector.forward([[0, 1]])) @@ -136,14 +138,14 @@ def testTransformedDist(self): def testEventShapes(self): shape_static = [5, 4, 3, 2] - shape_dynamic = tf.compat.v1.placeholder_with_default( + shape_dynamic = tf1.placeholder_with_default( tf.constant(shape_static), shape=None) def make_bijector(perm=None, rightmost_transposed_ndims=None): if perm is not None: perm = tf.convert_to_tensor(value=perm) if not self.is_static: - perm = tf.compat.v1.placeholder_with_default(perm, shape=perm.shape) + perm = tf1.placeholder_with_default(perm, shape=perm.shape) return tfb.Transpose( perm, rightmost_transposed_ndims=rightmost_transposed_ndims) @@ -191,8 +193,8 @@ def testPartialStaticPermEventShapes(self): if tf.executing_eagerly(): return # this test is not interesting in eager. perm = tf.convert_to_tensor(value=[ tf.constant(2), - tf.compat.v1.placeholder_with_default(0, []), - tf.compat.v1.placeholder_with_default(1, []) + tf1.placeholder_with_default(0, []), + tf1.placeholder_with_default(1, []) ]) self.assertAllEqual([2, None, None], tf.get_static_value( perm, partial=True)) @@ -205,7 +207,7 @@ def testPartialStaticPermEventShapes(self): # Process of elimination should allow us to deduce one non-static perm idx. perm = tf.convert_to_tensor(value=[ tf.constant(2), - tf.compat.v1.placeholder_with_default(0, []), + tf1.placeholder_with_default(0, []), tf.constant(1) ]) self.assertAllEqual([2, None, 1], tf.get_static_value(perm, partial=True)) @@ -215,12 +217,12 @@ def testPartialStaticPermEventShapes(self): @test_util.run_all_in_graph_and_eager_modes -class TransposeBijectorDynamicTest(_TransposeBijectorTest, tf.test.TestCase): +class TransposeBijectorDynamicTest(_TransposeBijectorTest, test_case.TestCase): is_static = False @test_util.run_all_in_graph_and_eager_modes -class TransposeBijectorStaticTest(_TransposeBijectorTest, tf.test.TestCase): +class TransposeBijectorStaticTest(_TransposeBijectorTest, test_case.TestCase): is_static = True diff --git a/tensorflow_probability/python/bijectors/weibull_test.py b/tensorflow_probability/python/bijectors/weibull_test.py index bf2c2a8778..3e17c9546c 100644 --- a/tensorflow_probability/python/bijectors/weibull_test.py +++ b/tensorflow_probability/python/bijectors/weibull_test.py @@ -19,18 +19,20 @@ from __future__ import print_function # Dependency imports + import numpy as np from scipy import stats import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb - from tensorflow_probability.python.bijectors import bijector_test_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class WeibullBijectorTest(tf.test.TestCase): +class WeibullBijectorTest(test_case.TestCase): """Tests correctness of the weibull bijector.""" def testBijector(self): diff --git a/tensorflow_probability/python/debugging/benchmarking/benchmark_tf_function.py b/tensorflow_probability/python/debugging/benchmarking/benchmark_tf_function.py index b223d6e941..5ff90814a9 100644 --- a/tensorflow_probability/python/debugging/benchmarking/benchmark_tf_function.py +++ b/tensorflow_probability/python/debugging/benchmarking/benchmark_tf_function.py @@ -33,7 +33,7 @@ # Dependency imports -import tensorflow as tf +import tensorflow.compat.v2 as tf RUNTIME_EAGER = 'eager' RUNTIME_FUNCTION = 'function/graph' diff --git a/tensorflow_probability/python/distributions/BUILD b/tensorflow_probability/python/distributions/BUILD index 09957ef139..e0f79269bf 100644 --- a/tensorflow_probability/python/distributions/BUILD +++ b/tensorflow_probability/python/distributions/BUILD @@ -1376,6 +1376,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1388,6 +1389,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1506,6 +1508,7 @@ py_test( deps = [ ":deprecated_linalg", # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1556,6 +1559,7 @@ py_test( # tensorflow dep, "//tensorflow_probability", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1618,6 +1622,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1631,6 +1636,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/positive_semidefinite_kernels", ], ) @@ -1675,6 +1681,7 @@ py_test( # scipy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1730,6 +1737,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1758,6 +1766,7 @@ py_test( # tensorflow dep, "//tensorflow_probability", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1783,6 +1792,7 @@ py_test( # scipy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1796,6 +1806,7 @@ py_test( ":normal", # tensorflow dep, "//tensorflow_probability/python/internal:reparameterization", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -1819,6 +1830,7 @@ py_test( # absl/testing:parameterized dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1845,6 +1857,7 @@ py_test( # scipy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1917,6 +1930,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1931,6 +1945,7 @@ py_test( # tensorflow dep, "//tensorflow_probability", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -1981,6 +1996,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -2010,6 +2026,7 @@ py_test( # tensorflow dep, "//tensorflow_probability", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -2023,6 +2040,7 @@ py_test( # tensorflow dep, "//tensorflow_probability", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -2039,6 +2057,7 @@ py_test( # tensorflow dep, "//tensorflow_probability", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -2052,6 +2071,7 @@ py_test( # scipy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -2076,6 +2096,7 @@ py_test( deps = [ # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -2129,6 +2150,7 @@ py_test( deps = [ # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -2172,6 +2194,7 @@ py_test( # tensorflow dep, "//tensorflow_probability", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -2199,6 +2222,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -2238,6 +2262,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/positive_semidefinite_kernels", ], ) @@ -2252,6 +2277,7 @@ py_test( # tensorflow dep, "//tensorflow_probability", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -2294,6 +2320,7 @@ py_test( # scipy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -2303,11 +2330,11 @@ py_test( size = "small", srcs = ["variational_gaussian_process_test.py"], deps = [ - #":distributions", # numpy dep, # tensorflow dep, "//tensorflow_probability", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -2319,6 +2346,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -2331,6 +2359,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -2343,6 +2372,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -2355,6 +2385,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -2368,6 +2399,7 @@ py_test( # numpy dep, # scipy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -2383,6 +2415,7 @@ py_test( "//tensorflow_probability", "//tensorflow_probability/python/internal:dtype_util", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -2412,6 +2445,7 @@ py_test( # tensorflow dep, "//tensorflow_probability", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -2448,6 +2482,7 @@ py_library( "//tensorflow_probability/python/internal:hypothesis_testlib", "//tensorflow_probability/python/internal:tensor_util", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -2460,7 +2495,10 @@ py_test( main = "distribution_properties_test.py", shard_count = 10, tags = ["hypothesis"], - deps = [":distribution_properties_testlib"], + deps = [ + ":distribution_properties_testlib", + "//tensorflow_probability/python/internal:test_case", + ], ) py_test( @@ -2471,7 +2509,10 @@ py_test( main = "distribution_properties_test.py", shard_count = 10, tags = ["hypothesis"], - deps = [":distribution_properties_testlib"], + deps = [ + ":distribution_properties_testlib", + "//tensorflow_probability/python/internal:test_case", + ], ) test_suite( diff --git a/tensorflow_probability/python/distributions/autoregressive_test.py b/tensorflow_probability/python/distributions/autoregressive_test.py index 23a18ea245..9dbb8547bc 100644 --- a/tensorflow_probability/python/distributions/autoregressive_test.py +++ b/tensorflow_probability/python/distributions/autoregressive_test.py @@ -17,20 +17,21 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfb = tfp.bijectors -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes class AutogressiveTest(tfp_test_util.VectorDistributionTestHelpers, - tf.test.TestCase): + test_case.TestCase): """Tests the Autoregressive distribution.""" def setUp(self): diff --git a/tensorflow_probability/python/distributions/batch_reshape_test.py b/tensorflow_probability/python/distributions/batch_reshape_test.py index 29ddac5514..ed8f1f3f82 100644 --- a/tensorflow_probability/python/distributions/batch_reshape_test.py +++ b/tensorflow_probability/python/distributions/batch_reshape_test.py @@ -19,16 +19,15 @@ from __future__ import print_function # Dependency imports -import numpy as np +import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes @@ -591,14 +590,14 @@ def test_broadcasting_explicitly_unsupported(self): @test_util.run_all_in_graph_and_eager_modes -class BatchReshapeStaticTest(_BatchReshapeTest, tf.test.TestCase): +class BatchReshapeStaticTest(_BatchReshapeTest, test_case.TestCase): dtype = np.float32 is_static_shape = True @test_util.run_all_in_graph_and_eager_modes -class BatchReshapeDynamicTest(_BatchReshapeTest, tf.test.TestCase): +class BatchReshapeDynamicTest(_BatchReshapeTest, test_case.TestCase): dtype = np.float64 is_static_shape = False diff --git a/tensorflow_probability/python/distributions/bernoulli_test.py b/tensorflow_probability/python/distributions/bernoulli_test.py index 4c511e58a2..5f364dcc7c 100644 --- a/tensorflow_probability/python/distributions/bernoulli_test.py +++ b/tensorflow_probability/python/distributions/bernoulli_test.py @@ -18,21 +18,19 @@ from __future__ import division from __future__ import print_function - # Dependency imports + import numpy as np from scipy import special as sp_special - import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import tensorshape_util from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import def make_bernoulli(batch_shape, dtype=tf.int32): @@ -326,7 +324,7 @@ def __getitem__(self, slices): @test_util.run_all_in_graph_and_eager_modes -class BernoulliSlicingTest(tf.test.TestCase): +class BernoulliSlicingTest(test_case.TestCase): def testScalarSlice(self): logits = self.evaluate(tf.random.normal([])) diff --git a/tensorflow_probability/python/distributions/binomial_test.py b/tensorflow_probability/python/distributions/binomial_test.py index 1feb43d8a5..aad3f14a02 100644 --- a/tensorflow_probability/python/distributions/binomial_test.py +++ b/tensorflow_probability/python/distributions/binomial_test.py @@ -17,20 +17,20 @@ from __future__ import print_function # Dependency imports + import numpy as np from scipy import stats import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -tfd = tfp.distributions from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class BinomialTest(tf.test.TestCase): +class BinomialTest(test_case.TestCase): def setUp(self): self._rng = np.random.RandomState(42) diff --git a/tensorflow_probability/python/distributions/deprecated_linalg_test.py b/tensorflow_probability/python/distributions/deprecated_linalg_test.py index 2a8fb50bb8..e30bf6de11 100644 --- a/tensorflow_probability/python/distributions/deprecated_linalg_test.py +++ b/tensorflow_probability/python/distributions/deprecated_linalg_test.py @@ -21,11 +21,12 @@ import tensorflow.compat.v2 as tf from tensorflow_probability.python.distributions.deprecated_linalg import tridiag +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class TridiagTest(tf.test.TestCase): +class TridiagTest(test_case.TestCase): def testWorksCorrectlyNoBatches(self): self.assertAllEqual( diff --git a/tensorflow_probability/python/distributions/distribution.py b/tensorflow_probability/python/distributions/distribution.py index 686ac934df..67dc7cd44c 100644 --- a/tensorflow_probability/python/distributions/distribution.py +++ b/tensorflow_probability/python/distributions/distribution.py @@ -448,11 +448,11 @@ def __init__(self, Args: dtype: The type of the event samples. `None` implies no type-enforcement. reparameterization_type: Instance of `ReparameterizationType`. - If `tfd.FULLY_REPARAMETERIZED`, this - `Distribution` can be reparameterized in terms of some standard - distribution with a function whose Jacobian is constant for the support - of the standard distribution. If `tfd.NOT_REPARAMETERIZED`, - then no such reparameterization is available. + If `tfd.FULLY_REPARAMETERIZED`, then samples from the distribution are + fully reparameterized, and straight-through gradients are supported. + If `tfd.NOT_REPARAMETERIZED`, then samples from the distribution are not + fully reparameterized, and straight-through gradients are either + partially unsupported or are not supported at all. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect diff --git a/tensorflow_probability/python/distributions/distribution_properties_test.py b/tensorflow_probability/python/distributions/distribution_properties_test.py index 6db2060c37..50a6467de8 100644 --- a/tensorflow_probability/python/distributions/distribution_properties_test.py +++ b/tensorflow_probability/python/distributions/distribution_properties_test.py @@ -32,15 +32,18 @@ import numpy as np import six import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp + +from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.bijectors import hypothesis_testlib as bijector_hps from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps from tensorflow_probability.python.internal import tensor_util from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions flags.DEFINE_enum('tf_mode', 'graph', ['eager', 'graph'], 'TF execution mode to use') @@ -678,7 +681,7 @@ def get_event_dim(dist): @test_util.run_all_in_graph_and_eager_modes -class DistributionParamsAreVarsTest(parameterized.TestCase, tf.test.TestCase): +class DistributionParamsAreVarsTest(parameterized.TestCase, test_case.TestCase): @parameterized.named_parameters( {'testcase_name': dname, 'dist_name': dname} @@ -852,7 +855,7 @@ def no_tf_rank_errors(): @test_util.run_all_in_graph_and_eager_modes -class ReproducibilityTest(parameterized.TestCase, tf.test.TestCase): +class ReproducibilityTest(parameterized.TestCase, test_case.TestCase): @parameterized.named_parameters( {'testcase_name': dname, 'dist_name': dname} @@ -872,7 +875,7 @@ def testDistribution(self, dist_name, data): @test_util.run_all_in_graph_and_eager_modes -class DistributionSlicingTest(tf.test.TestCase): +class DistributionSlicingTest(test_case.TestCase): def _test_slicing(self, data, dist): strm = tfp_test_util.test_seed_stream() @@ -975,7 +978,6 @@ def testDistributions(self, data): def disabled_testFailureCase(self): # TODO(b/140229057): This test should pass. - tfb = tfp.bijectors dist = tfd.Chi(df=np.float32(27.744131)) dist = tfd.TransformedDistribution( bijector=tfb.NormalCDF(), distribution=dist, batch_shape=[4]) diff --git a/tensorflow_probability/python/distributions/distribution_test.py b/tensorflow_probability/python/distributions/distribution_test.py index 90a3b79f6f..510fdafdc6 100644 --- a/tensorflow_probability/python/distributions/distribution_test.py +++ b/tensorflow_probability/python/distributions/distribution_test.py @@ -19,16 +19,15 @@ import collections # Dependency imports + import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import tensorshape_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top +from tensorflow_probability.python.internal import test_case - -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top class TupleDistribution(tfd.Distribution): @@ -109,7 +108,7 @@ def event_shape(self): @test_util.run_all_in_graph_and_eager_modes -class DistributionStrReprTest(tf.test.TestCase): +class DistributionStrReprTest(test_case.TestCase): def testStrWorksCorrectlyScalar(self): normal = tfd.Normal(loc=np.float16(0), scale=1) @@ -282,7 +281,7 @@ def testReprWorksCorrectlyNamedTupleDistribution(self): @test_util.run_all_in_graph_and_eager_modes -class DistributionTest(tf.test.TestCase): +class DistributionTest(test_case.TestCase): def testParamShapesAndFromParams(self): classes = [ @@ -522,7 +521,7 @@ def _mean(self): return self._mean_ -class ParametersTest(tf.test.TestCase): +class ParametersTest(test_case.TestCase): def testParameters(self): d = Dummy(1., arg2=2.) @@ -548,7 +547,7 @@ def normal_differential_entropy(scale): @test_util.run_all_in_graph_and_eager_modes -class TfModuleTest(tf.test.TestCase): +class TfModuleTest(test_case.TestCase): def test_variable_tracking_works(self): scale = tf.Variable(1.) @@ -568,7 +567,7 @@ def test_gradient(self): @test_util.run_all_in_graph_and_eager_modes -class ConditionalDistributionTest(tf.test.TestCase): +class ConditionalDistributionTest(test_case.TestCase): def _GetFakeDistribution(self): class _FakeDistribution(tfd.Distribution): diff --git a/tensorflow_probability/python/distributions/finite_discrete_test.py b/tensorflow_probability/python/distributions/finite_discrete_test.py index 838f604f11..9bacb7958b 100644 --- a/tensorflow_probability/python/distributions/finite_discrete_test.py +++ b/tensorflow_probability/python/distributions/finite_discrete_test.py @@ -21,12 +21,10 @@ import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import test_case -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes @@ -387,32 +385,32 @@ def testParamTensorFromProbs(self): class FiniteDiscreteValidateArgsStaticShapeTest(FiniteDiscreteValidateArgsTest, - tf.test.TestCase): + test_case.TestCase): use_static_shape = True class FiniteDiscreteValidateArgsDynamicShapeTest(FiniteDiscreteValidateArgsTest, - tf.test.TestCase): + test_case.TestCase): use_static_shape = False class FiniteDiscreteScalarStaticShapeTest(FiniteDiscreteScalarTest, - tf.test.TestCase): + test_case.TestCase): use_static_shape = True class FiniteDiscreteScalarDynamicShapeTest(FiniteDiscreteScalarTest, - tf.test.TestCase): + test_case.TestCase): use_static_shape = False class FiniteDiscreteVectorStaticShapeTest(FiniteDiscreteVectorTest, - tf.test.TestCase): + test_case.TestCase): use_static_shape = True class FiniteDiscreteVectorDynamicShapeTest(FiniteDiscreteVectorTest, - tf.test.TestCase): + test_case.TestCase): use_static_shape = False diff --git a/tensorflow_probability/python/distributions/gamma_gamma_test.py b/tensorflow_probability/python/distributions/gamma_gamma_test.py index 6a79e4b14b..6484e66511 100644 --- a/tensorflow_probability/python/distributions/gamma_gamma_test.py +++ b/tensorflow_probability/python/distributions/gamma_gamma_test.py @@ -17,17 +17,18 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -tfd = tfp.distributions + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class GammaGammaTest(tf.test.TestCase): +class GammaGammaTest(test_case.TestCase): def testGammaGammaShape(self): gg = tfd.GammaGamma( diff --git a/tensorflow_probability/python/distributions/gaussian_process_regression_model.py b/tensorflow_probability/python/distributions/gaussian_process_regression_model.py index f62f5758be..8f0934b86d 100644 --- a/tensorflow_probability/python/distributions/gaussian_process_regression_model.py +++ b/tensorflow_probability/python/distributions/gaussian_process_regression_model.py @@ -21,11 +21,11 @@ # Dependency imports import tensorflow.compat.v2 as tf -from tensorflow_probability.python import positive_semidefinite_kernels as tfpk from tensorflow_probability.python import util as tfp_util from tensorflow_probability.python.distributions import gaussian_process from tensorflow_probability.python.internal import dtype_util from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.math import psd_kernels as tfpk __all__ = [ 'GaussianProcessRegressionModel', diff --git a/tensorflow_probability/python/distributions/gaussian_process_test.py b/tensorflow_probability/python/distributions/gaussian_process_test.py index e5ba727009..c7d2adcb8f 100644 --- a/tensorflow_probability/python/distributions/gaussian_process_test.py +++ b/tensorflow_probability/python/distributions/gaussian_process_test.py @@ -25,6 +25,7 @@ from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python import positive_semidefinite_kernels as psd_kernels from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -227,12 +228,12 @@ def testMarginalHasCorrectTypes(self): @test_util.run_all_in_graph_and_eager_modes -class GaussianProcessStaticTest(_GaussianProcessTest, tf.test.TestCase): +class GaussianProcessStaticTest(_GaussianProcessTest, test_case.TestCase): is_static = True @test_util.run_all_in_graph_and_eager_modes -class GaussianProcessDynamicTest(_GaussianProcessTest, tf.test.TestCase): +class GaussianProcessDynamicTest(_GaussianProcessTest, test_case.TestCase): is_static = False diff --git a/tensorflow_probability/python/distributions/geometric_test.py b/tensorflow_probability/python/distributions/geometric_test.py index 458fae4abb..71c32b8bf8 100644 --- a/tensorflow_probability/python/distributions/geometric_test.py +++ b/tensorflow_probability/python/distributions/geometric_test.py @@ -19,14 +19,15 @@ from __future__ import print_function # Dependency imports + import numpy as np from scipy import stats import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -tfd = tfp.distributions + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -34,7 +35,7 @@ # represents the "Shifted" Geometric distribution. Hence, loc=-1 is passed # in to each scipy function for testing. @test_util.run_all_in_graph_and_eager_modes -class GeometricTest(tf.test.TestCase): +class GeometricTest(test_case.TestCase): def testGeometricShape(self): probs = tf.constant([.1] * 5) diff --git a/tensorflow_probability/python/distributions/hidden_markov_model_test.py b/tensorflow_probability/python/distributions/hidden_markov_model_test.py index 80999b1e57..dae9fde2ae 100644 --- a/tensorflow_probability/python/distributions/hidden_markov_model_test.py +++ b/tensorflow_probability/python/distributions/hidden_markov_model_test.py @@ -19,14 +19,16 @@ from __future__ import print_function # Dependency imports + from absl.testing import parameterized import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -tfd = tfp.distributions + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top # pylint: disable=no-member @@ -36,7 +38,7 @@ class _HiddenMarkovModelTest( tfp_test_util.VectorDistributionTestHelpers, tfp_test_util.DiscreteScalarDistributionTestHelpers, - tf.test.TestCase, + test_case.TestCase, parameterized.TestCase): @staticmethod diff --git a/tensorflow_probability/python/distributions/independent_test.py b/tensorflow_probability/python/distributions/independent_test.py index 38737f7565..10ba7ba0e8 100644 --- a/tensorflow_probability/python/distributions/independent_test.py +++ b/tensorflow_probability/python/distributions/independent_test.py @@ -19,23 +19,21 @@ from __future__ import print_function # Dependency imports + import numpy as np from scipy import stats as sp_stats - import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top - -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class IndependentDistributionTest(tf.test.TestCase): +class IndependentDistributionTest(test_case.TestCase): def setUp(self): self._rng = np.random.RandomState(42) diff --git a/tensorflow_probability/python/distributions/internal/BUILD b/tensorflow_probability/python/distributions/internal/BUILD index cebad95ad2..b784b5582c 100644 --- a/tensorflow_probability/python/distributions/internal/BUILD +++ b/tensorflow_probability/python/distributions/internal/BUILD @@ -53,6 +53,7 @@ py_test( ":moving_stats", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -79,6 +80,7 @@ py_test( # absl/testing:parameterized dep, # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -124,6 +126,7 @@ py_test( ":statistical_testing", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -144,5 +147,6 @@ py_test( ":slicing", # tensorflow dep, "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_test.py b/tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_test.py index 29fcc542cb..f3889de904 100644 --- a/tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_test.py +++ b/tensorflow_probability/python/distributions/internal/correlation_matrix_volumes_test.py @@ -34,10 +34,12 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.distributions.internal import correlation_matrix_volumes_lib as corr from tensorflow_probability.python.distributions.internal import statistical_testing as st +from tensorflow_probability.python.internal import test_case # NxN correlation matrices are determined by the N*(N-1)/2 @@ -77,7 +79,7 @@ def four_by_four_volume(): # method," Journal of Multivariate Analysis 100 (2009), pp 1989-2001. -class CorrelationMatrixVolumesTest(tf.test.TestCase): +class CorrelationMatrixVolumesTest(test_case.TestCase): def testRejection2D(self): num_samples = int(1e5) # Chosen for a small min detectable discrepancy @@ -91,7 +93,7 @@ def testRejection2D(self): chk1 = st.assert_true_mean_equal_by_dkwm( rej_weights, low=0., high=rej_proposal_volume, expected=exact_volumes, false_fail_rate=1e-6) - chk2 = tf.compat.v1.assert_less( + chk2 = tf1.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=0., @@ -115,7 +117,7 @@ def testRejection3D(self): chk1 = st.assert_true_mean_equal_by_dkwm( rej_weights, low=0., high=rej_proposal_volume, expected=exact_volumes, false_fail_rate=1e-6) - chk2 = tf.compat.v1.assert_less( + chk2 = tf1.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=0., @@ -139,7 +141,7 @@ def testRejection4D(self): chk1 = st.assert_true_mean_equal_by_dkwm( rej_weights, low=0., high=rej_proposal_volume, expected=exact_volumes, false_fail_rate=1e-6) - chk2 = tf.compat.v1.assert_less( + chk2 = tf1.assert_less( st.min_discrepancy_of_true_means_detectable_by_dkwm( num_samples, low=0., diff --git a/tensorflow_probability/python/distributions/internal/moving_stats.py b/tensorflow_probability/python/distributions/internal/moving_stats.py index 501cb5f166..0f0bd2332a 100644 --- a/tensorflow_probability/python/distributions/internal/moving_stats.py +++ b/tensorflow_probability/python/distributions/internal/moving_stats.py @@ -17,7 +17,8 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import @@ -74,10 +75,10 @@ def assign_moving_mean_variance( _Technical Report_, 2009. http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf """ - with tf.compat.v1.name_scope(name, "assign_moving_mean_variance", + with tf1.name_scope(name, "assign_moving_mean_variance", [variance_var, mean_var, value, decay]): - with tf.compat.v1.colocate_with(variance_var): - with tf.compat.v1.colocate_with(mean_var): + with tf1.colocate_with(variance_var): + with tf1.colocate_with(mean_var): base_dtype = mean_var.dtype.base_dtype if not base_dtype.is_floating: raise TypeError( @@ -153,7 +154,7 @@ def assign_log_moving_mean_exp( TypeError: if `log_mean_exp_var`, `log_value`, `decay` have different `base_dtype`. """ - with tf.compat.v1.name_scope(name, "assign_log_moving_mean_exp", + with tf1.name_scope(name, "assign_log_moving_mean_exp", [log_mean_exp_var, log_value, decay]): # We want to update the variable in a numerically stable and lock-free way. # To do this, observe that variable `x` updated by `v` is: @@ -161,7 +162,7 @@ def assign_log_moving_mean_exp( # = log(exp(x + log(w)) + exp(v + log1p(-w))) # = x + log(exp(x - x + log(w)) + exp(v - x + log1p(-w))) # = x + lse([log(w), v - x + log1p(-w)]) - with tf.compat.v1.colocate_with(log_mean_exp_var): + with tf1.colocate_with(log_mean_exp_var): base_dtype = log_mean_exp_var.dtype.base_dtype if not base_dtype.is_floating: raise TypeError( @@ -224,7 +225,7 @@ def moving_mean_variance(value, decay, name=None): _Technical Report_, 2009. http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf """ - with tf.compat.v1.variable_scope(name, "moving_mean_variance", + with tf1.variable_scope(name, "moving_mean_variance", [value, decay]): value = tf.convert_to_tensor(value=value, name="value") base_dtype = value.dtype.base_dtype @@ -233,11 +234,11 @@ def moving_mean_variance(value, decay, name=None): "value.base_dtype({}) does not have float type `dtype`.".format( base_dtype.name)) decay = tf.convert_to_tensor(value=decay, dtype=base_dtype, name="decay") - variance_var = tf.compat.v2.Variable( + variance_var = tf.Variable( name="moving_variance", initial_value=tf.zeros(shape=value.shape, dtype=value.dtype), trainable=False) - mean_var = tf.compat.v2.Variable( + mean_var = tf.Variable( name="moving_mean", initial_value=tf.zeros(shape=value.shape, dtype=value.dtype), trainable=False) diff --git a/tensorflow_probability/python/distributions/internal/moving_stats_test.py b/tensorflow_probability/python/distributions/internal/moving_stats_test.py index 6b8d008f5c..7c83c050e0 100644 --- a/tensorflow_probability/python/distributions/internal/moving_stats_test.py +++ b/tensorflow_probability/python/distributions/internal/moving_stats_test.py @@ -23,11 +23,12 @@ import tensorflow.compat.v2 as tf from tensorflow_probability.python.distributions.internal import moving_stats +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class MovingReduceMeanVarianceTest(tf.test.TestCase): +class MovingReduceMeanVarianceTest(test_case.TestCase): def test_assign_moving_mean_variance(self): shape = [1, 2] @@ -94,7 +95,7 @@ def cond(it): @test_util.run_all_in_graph_and_eager_modes -class MovingLogExponentialMovingMeanExpTest(tf.test.TestCase): +class MovingLogExponentialMovingMeanExpTest(test_case.TestCase): def test_assign_log_moving_mean_exp(self): shape = [1, 2] diff --git a/tensorflow_probability/python/distributions/internal/slicing_test.py b/tensorflow_probability/python/distributions/internal/slicing_test.py index 54ca8667fd..f2377f5896 100644 --- a/tensorflow_probability/python/distributions/internal/slicing_test.py +++ b/tensorflow_probability/python/distributions/internal/slicing_test.py @@ -18,11 +18,12 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.distributions.internal import slicing from tensorflow_probability.python.internal import tensorshape_util - +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @@ -36,7 +37,7 @@ def __getitem__(self, slices): @test_util.run_all_in_graph_and_eager_modes -class SlicingTest(tf.test.TestCase): +class SlicingTest(test_case.TestCase): def test_single_param_slice_withstep_broadcastdim(self): event_dim = 3 @@ -120,9 +121,9 @@ def test_single_param_slice_int_broadcastdim(self): self.assertAllEqual((7, 5, 4, 3), self.evaluate(sliced).shape) def test_single_param_slice_tensor(self): - param = tf.compat.v1.placeholder_with_default( + param = tf1.placeholder_with_default( tf.zeros([7, 6, 5, 4, 3]), shape=None) - idx = tf.compat.v1.placeholder_with_default( + idx = tf1.placeholder_with_default( tf.constant(2, dtype=tf.int32), shape=[]) sliced = slicing._slice_single_param( param, @@ -132,9 +133,9 @@ def test_single_param_slice_tensor(self): self.assertAllEqual((7, 5, 4, 3), self.evaluate(sliced).shape) def test_single_param_slice_tensor_broadcastdim(self): - param = tf.compat.v1.placeholder_with_default( + param = tf1.placeholder_with_default( tf.zeros([7, 1, 5, 4, 3]), shape=None) - idx = tf.compat.v1.placeholder_with_default( + idx = tf1.placeholder_with_default( tf.constant(2, dtype=tf.int32), shape=[]) sliced = slicing._slice_single_param( param, diff --git a/tensorflow_probability/python/distributions/internal/statistical_testing.py b/tensorflow_probability/python/distributions/internal/statistical_testing.py index c6ffd6208a..a5e5ee6b8b 100644 --- a/tensorflow_probability/python/distributions/internal/statistical_testing.py +++ b/tensorflow_probability/python/distributions/internal/statistical_testing.py @@ -128,7 +128,8 @@ import functools import itertools -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.internal import dtype_util from tensorflow_probability.python.internal import tensorshape_util @@ -211,20 +212,20 @@ def assert_true_cdf_equal_by_dkwm( check: Op that raises `InvalidArgumentError` if any expected CDF is outside the corresponding confidence envelope. """ - with tf.compat.v1.name_scope(name, 'assert_true_cdf_equal_by_dkwm', + with tf1.name_scope(name, 'assert_true_cdf_equal_by_dkwm', [samples, false_fail_rate]): dtype = dtype_util.common_dtype([samples, false_fail_rate], tf.float32) samples = tf.convert_to_tensor(value=samples, name='samples', dtype=dtype) false_fail_rate = tf.convert_to_tensor( value=false_fail_rate, name='false_fail_rate', dtype=dtype) - tf.compat.v1.assert_scalar(false_fail_rate) # Static shape + tf1.assert_scalar(false_fail_rate) # Static shape itemwise_false_fail_rate = _itemwise_error_rate( total_rate=false_fail_rate, param_tensors=[], samples_tensor=samples) n = tf.shape(input=samples)[0] envelope = _dkwm_cdf_envelope(n, itemwise_false_fail_rate) distance = kolmogorov_smirnov_distance(samples, cdf, left_continuous_cdf) - return tf.compat.v1.assert_less_equal( + return tf1.assert_less_equal( distance, envelope, message='Empirical CDF outside K-S envelope') @@ -272,7 +273,7 @@ def min_discrepancy_of_true_cdfs_detectable_by_dkwm( - `O(-log(false_fail_rate/K))`, and - `O(-log(false_pass_rate))`. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'min_discrepancy_of_true_cdfs_detectable_by_dkwm', [n, false_fail_rate, false_pass_rate]): dtype = dtype_util.common_dtype( @@ -339,7 +340,7 @@ def min_num_samples_for_dkwm_cdf_test( - `O(-log(false_pass_rate))`, and - `O(1 / discrepancy[i]**2)`. """ - with tf.compat.v1.name_scope(name, 'min_num_samples_for_dkwm_cdf_test', + with tf1.name_scope(name, 'min_num_samples_for_dkwm_cdf_test', [false_fail_rate, false_pass_rate, discrepancy]): dtype = dtype_util.common_dtype( [false_fail_rate, false_pass_rate, discrepancy], tf.float32) @@ -407,7 +408,7 @@ def kolmogorov_smirnov_distance( distance: Tensor of shape B: (Absolute) Kolmogorov-Smirnov distance between the empirical and analytic CDFs. """ - with tf.compat.v1.name_scope(name, 'kolmogorov_smirnov_distance', [samples]): + with tf1.name_scope(name, 'kolmogorov_smirnov_distance', [samples]): dtype = dtype_util.common_dtype([samples], tf.float32) samples = tf.convert_to_tensor(value=samples, name='samples', dtype=dtype) samples = _move_dim_and_sort(samples) @@ -472,7 +473,7 @@ def kolmogorov_smirnov_distance_two_sample(samples1, samples2, name=None): distance: Tensor of shape B: (Absolute) Kolmogorov-Smirnov distance between the two empirical CDFs given by the samples. """ - with tf.compat.v1.name_scope(name, 'kolmogorov_smirnov_distance_two_sample', + with tf1.name_scope(name, 'kolmogorov_smirnov_distance_two_sample', [samples1, samples2]): dtype = dtype_util.common_dtype([samples1, samples2], tf.float32) samples1 = tf.convert_to_tensor( @@ -503,7 +504,7 @@ def _move_dim_and_sort(samples): def _batch_sort_vector(x, ascending=True, name=None): """Batch sort. Sorts the -1 dimension of each batch member independently.""" - with tf.compat.v1.name_scope(name, '_batch_sort_vector', [x]): + with tf1.name_scope(name, '_batch_sort_vector', [x]): x = tf.convert_to_tensor(value=x, name='x') n = tf.shape(input=x)[-1] if ascending: @@ -555,7 +556,7 @@ def empirical_cdfs(samples, positions, continuity='right', msg = 'Continuity value must be "left" or "right", got {}.'.format( continuity) raise ValueError(msg) - with tf.compat.v1.name_scope(name, 'empirical_cdfs', [samples, positions]): + with tf1.name_scope(name, 'empirical_cdfs', [samples, positions]): n = tf.cast(tf.shape(input=samples)[-1], dtype=dtype) indexes = tf.searchsorted( sorted_sequence=samples, values=positions, side=continuity) @@ -564,7 +565,7 @@ def empirical_cdfs(samples, positions, continuity='right', def _do_maximum_mean(samples, envelope, high, name=None): """Common code between maximum_mean and minimum_mean.""" - with tf.compat.v1.name_scope(name, 'do_maximum_mean', + with tf1.name_scope(name, 'do_maximum_mean', [samples, envelope, high]): dtype = dtype_util.common_dtype([samples, envelope, high], tf.float32) samples = tf.convert_to_tensor(value=samples, name='samples', dtype=dtype) @@ -636,7 +637,7 @@ def assert_true_cdf_equal_by_dkwm_two_sample( check: Op that raises `InvalidArgumentError` if any expected CDF is outside the corresponding confidence envelope. """ - with tf.compat.v1.name_scope(name, 'assert_true_cdf_equal_by_dkwm_two_sample', + with tf1.name_scope(name, 'assert_true_cdf_equal_by_dkwm_two_sample', [samples1, samples2, false_fail_rate]): dtype = dtype_util.common_dtype( [samples1, samples2, false_fail_rate], tf.float32) @@ -646,8 +647,8 @@ def assert_true_cdf_equal_by_dkwm_two_sample( value=samples2, name='samples2', dtype=dtype) false_fail_rate = tf.convert_to_tensor( value=false_fail_rate, name='false_fail_rate', dtype=dtype) - tf.compat.v1.assert_scalar(false_fail_rate) # Static shape - compatible_samples = tf.compat.v1.assert_equal( + tf1.assert_scalar(false_fail_rate) # Static shape + compatible_samples = tf1.assert_equal( tf.shape(input=samples1)[1:], tf.shape(input=samples2)[1:]) with tf.control_dependencies([compatible_samples]): @@ -659,7 +660,7 @@ def assert_true_cdf_equal_by_dkwm_two_sample( n2 = tf.shape(input=samples2)[0] envelope2 = _dkwm_cdf_envelope(n2, itemwise_false_fail_rate) distance = kolmogorov_smirnov_distance_two_sample(samples1, samples2) - return tf.compat.v1.assert_less_equal( + return tf1.assert_less_equal( distance, envelope1 + envelope2, message='Empirical CDFs outside joint K-S envelope') @@ -709,7 +710,7 @@ def min_discrepancy_of_true_cdfs_detectable_by_dkwm_two_sample( - `O(-log(false_fail_rate/K))`, and - `O(-log(false_pass_rate))`. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'min_discrepancy_of_true_cdfs_detectable_by_dkwm_two_sample', [n1, n2, false_fail_rate, false_pass_rate]): dtype = dtype_util.common_dtype( @@ -765,7 +766,7 @@ def min_num_samples_for_dkwm_cdf_two_sample_test( - `O(-log(false_pass_rate))`, and - `O(1 / discrepancy[i]**2)`. """ - with tf.compat.v1.name_scope(name, + with tf1.name_scope(name, 'min_num_samples_for_dkwm_cdf_two_sample_test', [discrepancy, false_fail_rate, false_pass_rate]): dtype = dtype_util.common_dtype( @@ -814,7 +815,7 @@ def _maximum_mean(samples, envelope, high, name=None): InvalidArgumentError: If some `sample` is found to be larger than the corresponding `high`. """ - with tf.compat.v1.name_scope(name, 'maximum_mean', [samples, envelope, high]): + with tf1.name_scope(name, 'maximum_mean', [samples, envelope, high]): dtype = dtype_util.common_dtype([samples, envelope, high], tf.float32) samples = tf.convert_to_tensor(value=samples, name='samples', dtype=dtype) envelope = tf.convert_to_tensor( @@ -823,7 +824,7 @@ def _maximum_mean(samples, envelope, high, name=None): xmax = tf.reduce_max(input_tensor=samples, axis=[0]) msg = 'Given sample maximum value exceeds expectations' - check_op = tf.compat.v1.assert_less_equal(xmax, high, message=msg) + check_op = tf1.assert_less_equal(xmax, high, message=msg) with tf.control_dependencies([check_op]): return tf.identity(_do_maximum_mean(samples, envelope, high)) @@ -861,7 +862,7 @@ def _minimum_mean(samples, envelope, low, name=None): InvalidArgumentError: If some `sample` is found to be smaller than the corresponding `low`. """ - with tf.compat.v1.name_scope(name, 'minimum_mean', [samples, envelope, low]): + with tf1.name_scope(name, 'minimum_mean', [samples, envelope, low]): dtype = dtype_util.common_dtype([samples, envelope, low], tf.float32) samples = tf.convert_to_tensor(value=samples, name='samples', dtype=dtype) envelope = tf.convert_to_tensor( @@ -870,7 +871,7 @@ def _minimum_mean(samples, envelope, low, name=None): xmin = tf.reduce_min(input_tensor=samples, axis=[0]) msg = 'Given sample minimum value falls below expectations' - check_op = tf.compat.v1.assert_greater_equal(xmin, low, message=msg) + check_op = tf1.assert_greater_equal(xmin, low, message=msg) with tf.control_dependencies([check_op]): return - _do_maximum_mean(-samples, envelope, -low) @@ -903,7 +904,7 @@ def _dkwm_cdf_envelope(n, error_rate, name=None): as `O(1 / sqrt(n))`. The shape is the broadcast of `n` and `error_rate`. """ - with tf.compat.v1.name_scope(name, 'dkwm_cdf_envelope', [n, error_rate]): + with tf1.name_scope(name, 'dkwm_cdf_envelope', [n, error_rate]): n = tf.cast(n, dtype=error_rate.dtype) return tf.sqrt(-tf.math.log(error_rate / 2.) / (2. * n)) @@ -930,8 +931,8 @@ def check(t): # This rank check ensures that I don't get a wrong answer from the # _shapes_ broadcasting against each other. samples_batch_ndims = tf.size(input=samples_batch_shape) - ge = tf.compat.v1.assert_greater_equal(samples_batch_ndims, tf.rank(t)) - eq = tf.compat.v1.assert_equal(samples_batch_shape, broadcasted_batch_shape) + ge = tf1.assert_greater_equal(samples_batch_ndims, tf.rank(t)) + eq = tf1.assert_equal(samples_batch_shape, broadcasted_batch_shape) return ge, eq checks = list(itertools.chain(*[check(t) for t in parameters])) with tf.control_dependencies(checks): @@ -978,7 +979,7 @@ def true_mean_confidence_interval_by_dkwm( high: A floating-point `Tensor` of stochastic upper bounds on the true means. """ - with tf.compat.v1.name_scope(name, 'true_mean_confidence_interval_by_dkwm', + with tf1.name_scope(name, 'true_mean_confidence_interval_by_dkwm', [samples, low, high, error_rate]): dtype = dtype_util.common_dtype( [samples, low, high, error_rate], tf.float32) @@ -988,7 +989,7 @@ def true_mean_confidence_interval_by_dkwm( error_rate = tf.convert_to_tensor( value=error_rate, name='error_rate', dtype=dtype) samples = _check_shape_dominates(samples, [low, high]) - tf.compat.v1.assert_scalar(error_rate) # Static shape + tf1.assert_scalar(error_rate) # Static shape itemwise_error_rate = _itemwise_error_rate( total_rate=error_rate, param_tensors=[low, high], samples_tensor=samples) @@ -1002,7 +1003,7 @@ def true_mean_confidence_interval_by_dkwm( def _itemwise_error_rate( total_rate, param_tensors, samples_tensor=None, name=None): """Distributes a total error rate for a batch of assertions.""" - with tf.compat.v1.name_scope(name, 'itemwise_error_rate', + with tf1.name_scope(name, 'itemwise_error_rate', [total_rate, param_tensors, samples_tensor]): result_shape = [1] for p_tensor in param_tensors: @@ -1051,7 +1052,7 @@ def assert_true_mean_equal_by_dkwm( check: Op that raises `InvalidArgumentError` if any expected mean is outside the corresponding confidence interval. """ - with tf.compat.v1.name_scope(name, 'assert_true_mean_equal_by_dkwm', + with tf1.name_scope(name, 'assert_true_mean_equal_by_dkwm', [samples, low, high, expected, false_fail_rate]): return assert_true_mean_in_interval_by_dkwm( samples, low, high, expected, expected, false_fail_rate) @@ -1111,7 +1112,7 @@ def min_discrepancy_of_true_means_detectable_by_dkwm( - `O(-log(false_fail_rate/K))`, and - `O(-log(false_pass_rate))`. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'min_discrepancy_of_true_means_detectable_by_dkwm', [n, low, high, false_fail_rate, false_pass_rate]): dtype = dtype_util.common_dtype( @@ -1177,7 +1178,7 @@ def min_num_samples_for_dkwm_mean_test( as `O((high[i] - low[i])**2)`, `O(-log(false_fail_rate/K))`, `O(-log(false_pass_rate))`, and `O(1 / discrepancy[i]**2)`. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'min_num_samples_for_dkwm_mean_test', [low, high, false_fail_rate, false_pass_rate, discrepancy]): dtype = dtype_util.common_dtype( @@ -1237,7 +1238,7 @@ def assert_true_mean_in_interval_by_dkwm( interval. """ args_list = [samples, low, high, expected_low, expected_high, false_fail_rate] - with tf.compat.v1.name_scope(name, 'assert_true_mean_in_interval_by_dkwm', + with tf1.name_scope(name, 'assert_true_mean_in_interval_by_dkwm', args_list): dtype = dtype_util.common_dtype(args_list, tf.float32) samples = tf.convert_to_tensor(value=samples, name='samples', dtype=dtype) @@ -1259,13 +1260,13 @@ def assert_true_mean_in_interval_by_dkwm( # By DeMorgan's law, that's also equivalent to # not (max_mean < expected_low or min_mean > expected_high), # which is a way of saying the two intervals are not disjoint. - check_confidence_interval_can_intersect = tf.compat.v1.assert_greater_equal( + check_confidence_interval_can_intersect = tf1.assert_greater_equal( max_mean, expected_low, message='Confidence interval does not ' 'intersect: true mean smaller than expected') with tf.control_dependencies([check_confidence_interval_can_intersect]): - return tf.compat.v1.assert_less_equal( + return tf1.assert_less_equal( min_mean, expected_high, message='Confidence interval does not ' @@ -1319,7 +1320,7 @@ def assert_true_mean_equal_by_dkwm_two_sample( intervals true for corresponding true means do not overlap. """ args_list = [samples1, low1, high1, samples2, low2, high2, false_fail_rate] - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'assert_true_mean_equal_by_dkwm_two_sample', args_list): dtype = dtype_util.common_dtype(args_list, tf.float32) samples1 = tf.convert_to_tensor( @@ -1334,7 +1335,7 @@ def assert_true_mean_equal_by_dkwm_two_sample( value=false_fail_rate, name='false_fail_rate', dtype=dtype) samples1 = _check_shape_dominates(samples1, [low1, high1]) samples2 = _check_shape_dominates(samples2, [low2, high2]) - compatible_samples = tf.compat.v1.assert_equal( + compatible_samples = tf1.assert_equal( tf.shape(input=samples1)[1:], tf.shape(input=samples2)[1:]) with tf.control_dependencies([compatible_samples]): @@ -1410,7 +1411,7 @@ def min_discrepancy_of_true_means_detectable_by_dkwm_two_sample( """ args_list = ( [n1, low1, high1, n2, low2, high2, false_fail_rate, false_pass_rate]) - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'min_discrepancy_of_true_means_detectable_by_dkwm_two_sample', args_list): dtype = dtype_util.common_dtype(args_list, tf.float32) @@ -1482,7 +1483,7 @@ def min_num_samples_for_dkwm_mean_two_sample_test( """ args_list = ( [low1, high1, low2, high2, false_fail_rate, false_pass_rate, discrepancy]) - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'min_num_samples_for_dkwm_mean_two_sample_test', args_list): dtype = dtype_util.common_dtype(args_list, tf.float32) discrepancy = tf.convert_to_tensor( @@ -1580,7 +1581,7 @@ def assert_multivariate_true_cdf_equal_on_projections_two_sample( args_list = ( [samples1, samples2, num_projections, event_ndims, false_fail_rate]) strm = SeedStream(salt='random projections', seed=seed) - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'assert_multivariate_true_cdf_equal_on_projections_two_sample', args_list): @@ -1594,8 +1595,8 @@ def assert_multivariate_true_cdf_equal_on_projections_two_sample( value=num_projections, name='num_projections') false_fail_rate = tf.convert_to_tensor( value=false_fail_rate, name='false_fail_rate', dtype=dtype) - tf.compat.v1.assert_scalar(false_fail_rate) # Static shape - compatible_samples = tf.compat.v1.assert_equal( + tf1.assert_scalar(false_fail_rate) # Static shape + compatible_samples = tf1.assert_equal( tf.shape(input=samples1)[1:], tf.shape(input=samples2)[1:]) with tf.control_dependencies([compatible_samples]): diff --git a/tensorflow_probability/python/distributions/internal/statistical_testing_test.py b/tensorflow_probability/python/distributions/internal/statistical_testing_test.py index ea466e7bfe..64734464a7 100644 --- a/tensorflow_probability/python/distributions/internal/statistical_testing_test.py +++ b/tensorflow_probability/python/distributions/internal/statistical_testing_test.py @@ -23,20 +23,22 @@ # Dependency imports from absl.testing import parameterized import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.distributions.internal import statistical_testing as st +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import - # pylint: disable=g-error-prone-assert-raises + # This file is testing new assertions, which must of necessity appear in # assertRaises blocks to check that they do, in fact, raise when their terms are # violated. @test_util.run_all_in_graph_and_eager_modes -class StatisticalTestingTest(tf.test.TestCase, parameterized.TestCase): +class StatisticalTestingTest(test_case.TestCase, parameterized.TestCase): def assert_design_soundness(self, dtype, min_num_samples, min_discrepancy): thresholds = [1e-5, 1e-2, 1.1e-1, 0.9, 1., 1.02, 2., 10., 1e2, 1e5, 1e10] @@ -138,17 +140,17 @@ def test_kolmogorov_smirnov_distance(self, dtype): samples = tf.convert_to_tensor(value=samples, dtype=dtype) def cdf(x): ones = tf.ones_like(x) - answer = tf.compat.v1.where(x < 3, 0.6 * ones, ones) - answer = tf.compat.v1.where(x < 2, 0.3 * ones, answer) - answer = tf.compat.v1.where(x < 1, 0.1 * ones, answer) - return tf.compat.v1.where(x < 0, 0 * ones, answer) + answer = tf1.where(x < 3, 0.6 * ones, ones) + answer = tf1.where(x < 2, 0.3 * ones, answer) + answer = tf1.where(x < 1, 0.1 * ones, answer) + return tf1.where(x < 0, 0 * ones, answer) def left_continuous_cdf(x): ones = tf.ones_like(x) - answer = tf.compat.v1.where(x <= 3, 0.6 * ones, ones) - answer = tf.compat.v1.where(x <= 2, 0.3 * ones, answer) - answer = tf.compat.v1.where(x <= 1, 0.1 * ones, answer) - return tf.compat.v1.where(x <= 0, 0 * ones, answer) + answer = tf1.where(x <= 3, 0.6 * ones, ones) + answer = tf1.where(x <= 2, 0.3 * ones, answer) + answer = tf1.where(x <= 1, 0.1 * ones, answer) + return tf1.where(x <= 0, 0 * ones, answer) # Unlike empirical_cdfs, the samples Tensor must come in iid across the # leading dimension. obtained = self.evaluate(st.kolmogorov_smirnov_distance( @@ -166,17 +168,17 @@ def test_dkwm_cdf_one_sample_batch_discrete_assertion(self, dtype): samples = rng.choice(4, size=shape, p=probs).astype(dtype=dtype) def cdf(x): ones = tf.ones_like(x) - answer = tf.compat.v1.where(x < 3, 0.6 * ones, ones) - answer = tf.compat.v1.where(x < 2, 0.3 * ones, answer) - answer = tf.compat.v1.where(x < 1, 0.1 * ones, answer) - return tf.compat.v1.where(x < 0, 0 * ones, answer) + answer = tf1.where(x < 3, 0.6 * ones, ones) + answer = tf1.where(x < 2, 0.3 * ones, answer) + answer = tf1.where(x < 1, 0.1 * ones, answer) + return tf1.where(x < 0, 0 * ones, answer) def left_continuous_cdf(x): ones = tf.ones_like(x) - answer = tf.compat.v1.where(x <= 3, 0.6 * ones, ones) - answer = tf.compat.v1.where(x <= 2, 0.3 * ones, answer) - answer = tf.compat.v1.where(x <= 1, 0.1 * ones, answer) - return tf.compat.v1.where(x <= 0, 0 * ones, answer) + answer = tf1.where(x <= 3, 0.6 * ones, ones) + answer = tf1.where(x <= 2, 0.3 * ones, answer) + answer = tf1.where(x <= 1, 0.1 * ones, answer) + return tf1.where(x <= 0, 0 * ones, answer) self.evaluate(st.assert_true_cdf_equal_by_dkwm( samples, cdf, left_continuous_cdf=left_continuous_cdf, diff --git a/tensorflow_probability/python/distributions/inverse_gaussian_test.py b/tensorflow_probability/python/distributions/inverse_gaussian_test.py index 6693ad2e78..b7920db6f4 100644 --- a/tensorflow_probability/python/distributions/inverse_gaussian_test.py +++ b/tensorflow_probability/python/distributions/inverse_gaussian_test.py @@ -20,10 +20,10 @@ from scipy import stats import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -tfd = tfp.distributions + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -368,25 +368,25 @@ def testModifiedVariableAssertion(self): self.evaluate(inverse_gaussian.mean()) -class InverseGaussianTestStaticShapeFloat32(tf.test.TestCase, +class InverseGaussianTestStaticShapeFloat32(test_case.TestCase, _InverseGaussianTest): dtype = tf.float32 use_static_shape = True -class InverseGaussianTestDynamicShapeFloat32(tf.test.TestCase, +class InverseGaussianTestDynamicShapeFloat32(test_case.TestCase, _InverseGaussianTest): dtype = tf.float32 use_static_shape = False -class InverseGaussianTestStaticShapeFloat64(tf.test.TestCase, +class InverseGaussianTestStaticShapeFloat64(test_case.TestCase, _InverseGaussianTest): dtype = tf.float64 use_static_shape = True -class InverseGaussianTestDynamicShapeFloat64(tf.test.TestCase, +class InverseGaussianTestDynamicShapeFloat64(test_case.TestCase, _InverseGaussianTest): dtype = tf.float64 use_static_shape = False diff --git a/tensorflow_probability/python/distributions/joint_distribution_named_test.py b/tensorflow_probability/python/distributions/joint_distribution_named_test.py index 5eb46b73f0..462dc674b2 100644 --- a/tensorflow_probability/python/distributions/joint_distribution_named_test.py +++ b/tensorflow_probability/python/distributions/joint_distribution_named_test.py @@ -21,20 +21,18 @@ import collections # Dependency imports -from absl.testing import parameterized +from absl.testing import parameterized import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import - -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class JointDistributionNamedTest(tf.test.TestCase, parameterized.TestCase): +class JointDistributionNamedTest(test_case.TestCase, parameterized.TestCase): def test_dict_sample_log_prob(self): # pylint: disable=bad-whitespace diff --git a/tensorflow_probability/python/distributions/kullback_leibler_test.py b/tensorflow_probability/python/distributions/kullback_leibler_test.py index 45e6ed1e0c..40f862a81f 100644 --- a/tensorflow_probability/python/distributions/kullback_leibler_test.py +++ b/tensorflow_probability/python/distributions/kullback_leibler_test.py @@ -23,9 +23,10 @@ from tensorflow_probability.python.distributions import kullback_leibler from tensorflow_probability.python.distributions import normal from tensorflow_probability.python.internal import reparameterization +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint:disable=g-direct-tensorflow-import - # pylint: disable=protected-access + _DIVERGENCES = kullback_leibler._DIVERGENCES _registered_kl = kullback_leibler._registered_kl @@ -33,7 +34,7 @@ @test_util.run_all_in_graph_and_eager_modes -class KLTest(tf.test.TestCase): +class KLTest(test_case.TestCase): def testRegistration(self): diff --git a/tensorflow_probability/python/distributions/kumaraswamy_test.py b/tensorflow_probability/python/distributions/kumaraswamy_test.py index a94a3b4f8d..c8d36fb034 100644 --- a/tensorflow_probability/python/distributions/kumaraswamy_test.py +++ b/tensorflow_probability/python/distributions/kumaraswamy_test.py @@ -17,19 +17,17 @@ from __future__ import print_function # Dependency imports + import numpy as np from scipy import special as sp_special from scipy import stats as sp_stats - import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top - -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top def _kumaraswamy_mode(a, b): @@ -64,7 +62,7 @@ def _kumaraswamy_pdf(a, b, x): @test_util.run_all_in_graph_and_eager_modes -class KumaraswamyTest(tf.test.TestCase): +class KumaraswamyTest(test_case.TestCase): def testSimpleShapes(self): a = np.random.rand(3) diff --git a/tensorflow_probability/python/distributions/linear_gaussian_ssm_test.py b/tensorflow_probability/python/distributions/linear_gaussian_ssm_test.py index 80e47f596a..d8cef26a2b 100644 --- a/tensorflow_probability/python/distributions/linear_gaussian_ssm_test.py +++ b/tensorflow_probability/python/distributions/linear_gaussian_ssm_test.py @@ -19,11 +19,12 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.distributions.linear_gaussian_ssm import _augment_sample_shape from tensorflow_probability.python.distributions.linear_gaussian_ssm import backward_smoothing_update from tensorflow_probability.python.distributions.linear_gaussian_ssm import BackwardPassState @@ -35,13 +36,11 @@ from tensorflow_probability.python.distributions.linear_gaussian_ssm import kalman_transition from tensorflow_probability.python.distributions.linear_gaussian_ssm import KalmanFilterState from tensorflow_probability.python.distributions.linear_gaussian_ssm import linear_gaussian_update - from tensorflow_probability.python.internal import tensorshape_util -from tensorflow_probability.python.internal import test_case as tfp_test_case +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions tfl = tf.linalg @@ -169,25 +168,25 @@ def _build_placeholder(self, ndarray): @test_util.run_all_in_graph_and_eager_modes -class IIDNormalTestStatic32(_IIDNormalTest, tf.test.TestCase): +class IIDNormalTestStatic32(_IIDNormalTest, test_case.TestCase): use_static_shape = True dtype = np.float32 @test_util.run_all_in_graph_and_eager_modes -class IIDNormalTestStatic64(_IIDNormalTest, tf.test.TestCase): +class IIDNormalTestStatic64(_IIDNormalTest, test_case.TestCase): use_static_shape = True dtype = np.float64 @test_util.run_all_in_graph_and_eager_modes -class IIDNormalTestDynamic32(_IIDNormalTest, tf.test.TestCase): +class IIDNormalTestDynamic32(_IIDNormalTest, test_case.TestCase): use_static_shape = False dtype = np.float32 @test_util.run_all_in_graph_and_eager_modes -class SanityChecks(tf.test.TestCase): +class SanityChecks(test_case.TestCase): def test_deterministic_system(self): @@ -366,7 +365,7 @@ def testWhenNumTimestepsIsOne(self): @test_util.run_all_in_graph_and_eager_modes -class BatchTest(tf.test.TestCase): +class BatchTest(test_case.TestCase): """Test that methods broadcast batch dimensions for each parameter.""" def setUp(self): @@ -565,7 +564,7 @@ def testLatentsToObservationsWorksWithBatchShape(self): self.assertAllClose(pushforward_covs_, observation_covs_) -class MissingObservationsTests(tfp_test_case.TestCase): +class MissingObservationsTests(test_case.TestCase): # One test requires derivative with respect to # transition_noise.scale_diag so we allow this to be @@ -804,7 +803,7 @@ def testMaskWhenTimeSeriesHasSampleShape(self): @test_util.run_all_in_graph_and_eager_modes -class KalmanSmootherTest(tf.test.TestCase): +class KalmanSmootherTest(test_case.TestCase): def build_kf(self): # Define a simple model with 3D latents and 2D observations. @@ -1280,7 +1279,7 @@ def testPushforwardLatentsStepIsCorrect(self): @test_util.run_all_in_graph_and_eager_modes -class KalmanStepsTestStatic(tf.test.TestCase, _KalmanStepsTest): +class KalmanStepsTestStatic(test_case.TestCase, _KalmanStepsTest): use_static_shape = True @@ -1292,7 +1291,7 @@ def build_tensor(self, tensor): @test_util.run_all_in_graph_and_eager_modes -class KalmanStepsTestDynamic(tf.test.TestCase, _KalmanStepsTest): +class KalmanStepsTestDynamic(test_case.TestCase, _KalmanStepsTest): use_static_shape = False @@ -1348,7 +1347,7 @@ def testTooManyDimsThrowsError(self): @test_util.run_all_in_graph_and_eager_modes -class AugmentSampleShapeTestStatic(tf.test.TestCase, _AugmentSampleShapeTest): +class AugmentSampleShapeTestStatic(test_case.TestCase, _AugmentSampleShapeTest): def assertRaisesError(self, msg): return self.assertRaisesRegexp(Exception, msg) @@ -1365,7 +1364,8 @@ def maybe_evaluate(self, x): @test_util.run_all_in_graph_and_eager_modes -class AugmentSampleShapeTestDynamic(tf.test.TestCase, _AugmentSampleShapeTest): +class AugmentSampleShapeTestDynamic(test_case.TestCase, + _AugmentSampleShapeTest): def assertRaisesError(self, msg): if tf.executing_eagerly(): diff --git a/tensorflow_probability/python/distributions/lkj_test.py b/tensorflow_probability/python/distributions/lkj_test.py index 35dc6d1495..1ee780db48 100644 --- a/tensorflow_probability/python/distributions/lkj_test.py +++ b/tensorflow_probability/python/distributions/lkj_test.py @@ -19,20 +19,18 @@ from __future__ import print_function # Dependency imports + from absl.testing import parameterized import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.distributions.internal import statistical_testing as st from tensorflow_probability.python.internal import assert_util from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import, - -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import, def _det_ok_mask(x, det_bounds, input_output_cholesky=False): @@ -376,7 +374,7 @@ def testValidateConcentrationAfterMutation(self, dtype): self.evaluate(d.mean()) -class LKJTestGraphOnly(tf.test.TestCase): +class LKJTestGraphOnly(test_case.TestCase): def testDimensionGuardDynamicShape(self): if tf.executing_eagerly(): diff --git a/tensorflow_probability/python/distributions/lognormal_test.py b/tensorflow_probability/python/distributions/lognormal_test.py index 7a7372f8c8..fc85ceed2e 100644 --- a/tensorflow_probability/python/distributions/lognormal_test.py +++ b/tensorflow_probability/python/distributions/lognormal_test.py @@ -19,18 +19,18 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class LogNormalTest(tf.test.TestCase): +class LogNormalTest(test_case.TestCase): def setUp(self): self._rng = np.random.RandomState(123) diff --git a/tensorflow_probability/python/distributions/mixture_test.py b/tensorflow_probability/python/distributions/mixture_test.py index 3001dc9d61..6a2fae65ee 100644 --- a/tensorflow_probability/python/distributions/mixture_test.py +++ b/tensorflow_probability/python/distributions/mixture_test.py @@ -21,17 +21,17 @@ import contextlib # Dependency imports + import numpy as np from scipy import stats import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import def _swap_first_last_axes(array): @@ -151,7 +151,7 @@ def create_component(): @test_util.run_all_in_graph_and_eager_modes -class MixtureTest(tf.test.TestCase): +class MixtureTest(test_case.TestCase): use_static_graph = False def testShapes(self): diff --git a/tensorflow_probability/python/distributions/mvn_diag_plus_low_rank_test.py b/tensorflow_probability/python/distributions/mvn_diag_plus_low_rank_test.py index 4946c94b32..675992a12e 100644 --- a/tensorflow_probability/python/distributions/mvn_diag_plus_low_rank_test.py +++ b/tensorflow_probability/python/distributions/mvn_diag_plus_low_rank_test.py @@ -19,18 +19,19 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top -tfd = tfp.distributions @test_util.run_all_in_graph_and_eager_modes -class MultivariateNormalDiagPlusLowRankTest(tf.test.TestCase): +class MultivariateNormalDiagPlusLowRankTest(test_case.TestCase): """Well tested because this is a simple override of the base class.""" def setUp(self): diff --git a/tensorflow_probability/python/distributions/mvn_full_covariance_test.py b/tensorflow_probability/python/distributions/mvn_full_covariance_test.py index 8884b8386b..028619817e 100644 --- a/tensorflow_probability/python/distributions/mvn_full_covariance_test.py +++ b/tensorflow_probability/python/distributions/mvn_full_covariance_test.py @@ -19,20 +19,21 @@ from __future__ import print_function # Dependency imports + import numpy as np from scipy import stats import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions rng = np.random.RandomState(42) @test_util.run_all_in_graph_and_eager_modes -class MultivariateNormalFullCovarianceTest(tf.test.TestCase): +class MultivariateNormalFullCovarianceTest(test_case.TestCase): def _random_pd_matrix(self, *shape): mat = rng.rand(*shape) diff --git a/tensorflow_probability/python/distributions/mvn_linear_operator_test.py b/tensorflow_probability/python/distributions/mvn_linear_operator_test.py index 9184ff204b..fd9f290149 100644 --- a/tensorflow_probability/python/distributions/mvn_linear_operator_test.py +++ b/tensorflow_probability/python/distributions/mvn_linear_operator_test.py @@ -19,19 +19,19 @@ from __future__ import print_function # Dependency imports + import numpy as np from scipy import stats import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import tensorshape_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import +from tensorflow_probability.python.internal import test_case -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class MultivariateNormalLinearOperatorTest(tf.test.TestCase): +class MultivariateNormalLinearOperatorTest(test_case.TestCase): def setUp(self): self.rng = np.random.RandomState(42) diff --git a/tensorflow_probability/python/distributions/mvn_tril_test.py b/tensorflow_probability/python/distributions/mvn_tril_test.py index ade368ed7a..579b55c829 100644 --- a/tensorflow_probability/python/distributions/mvn_tril_test.py +++ b/tensorflow_probability/python/distributions/mvn_tril_test.py @@ -19,23 +19,22 @@ from __future__ import print_function # Dependency imports + from absl.testing import parameterized import numpy as np from scipy import stats - import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class MultivariateNormalTriLTest(tf.test.TestCase, parameterized.TestCase): +class MultivariateNormalTriLTest(test_case.TestCase, parameterized.TestCase): def setUp(self): self._rng = np.random.RandomState(42) @@ -428,7 +427,7 @@ def __getitem__(self, slices): @test_util.run_all_in_graph_and_eager_modes -class MultivariateNormalTriLSlicingTest(tf.test.TestCase, +class MultivariateNormalTriLSlicingTest(test_case.TestCase, parameterized.TestCase): def setUp(self): diff --git a/tensorflow_probability/python/distributions/negative_binomial_test.py b/tensorflow_probability/python/distributions/negative_binomial_test.py index 6e1f11d2a3..42e1b13835 100644 --- a/tensorflow_probability/python/distributions/negative_binomial_test.py +++ b/tensorflow_probability/python/distributions/negative_binomial_test.py @@ -17,23 +17,23 @@ from __future__ import print_function # Dependency imports + import numpy as np from scipy import stats import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top # In all tests that follow, we use scipy.stats.nbinom, which # represents a Negative Binomial distribution, with success and failure # probabilities flipped. @test_util.run_all_in_graph_and_eager_modes -class NegativeBinomialTest(tf.test.TestCase): +class NegativeBinomialTest(test_case.TestCase): def testNegativeBinomialShape(self): probs = [.1] * 5 @@ -256,7 +256,7 @@ def testParamTensorFromProbs(self): @test_util.run_all_in_graph_and_eager_modes -class NegativeBinomialFromVariableTest(tf.test.TestCase): +class NegativeBinomialFromVariableTest(test_case.TestCase): def testAssertionsProbsMutation(self): x = tf.Variable([0.1, 0.7, 0.0]) diff --git a/tensorflow_probability/python/distributions/normal_conjugate_posteriors_test.py b/tensorflow_probability/python/distributions/normal_conjugate_posteriors_test.py index f893385580..f9f0d71ddb 100644 --- a/tensorflow_probability/python/distributions/normal_conjugate_posteriors_test.py +++ b/tensorflow_probability/python/distributions/normal_conjugate_posteriors_test.py @@ -22,14 +22,14 @@ import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp -tfd = tfp.distributions +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class NormalTest(tf.test.TestCase): +class NormalTest(test_case.TestCase): def testNormalConjugateKnownSigmaPosterior(self): with tf1.Session(): diff --git a/tensorflow_probability/python/distributions/normal_test.py b/tensorflow_probability/python/distributions/normal_test.py index 29640d55df..2a55317f68 100644 --- a/tensorflow_probability/python/distributions/normal_test.py +++ b/tensorflow_probability/python/distributions/normal_test.py @@ -21,17 +21,17 @@ import math # Dependency imports + import numpy as np from scipy import stats as sp_stats - import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions @test_util.run_all_in_graph_and_eager_modes @@ -494,7 +494,7 @@ def testIncompatibleArgShapesEager(self): tfd.Normal(loc=tf.zeros([2, 3]), scale=scale, validate_args=True) -class NormalEagerGCTest(tf.test.TestCase): +class NormalEagerGCTest(test_case.TestCase): @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testNormalMeanAndMode(self): diff --git a/tensorflow_probability/python/distributions/poisson_lognormal_test.py b/tensorflow_probability/python/distributions/poisson_lognormal_test.py index e9b4dddbdf..7cb6bcc8f2 100644 --- a/tensorflow_probability/python/distributions/poisson_lognormal_test.py +++ b/tensorflow_probability/python/distributions/poisson_lognormal_test.py @@ -20,10 +20,10 @@ import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -tfd = tfp.distributions from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -125,7 +125,7 @@ def testGradientThroughNonVariableParams(self): @test_util.run_all_in_graph_and_eager_modes class PoissonLogNormalQuadratureCompoundStaticShapeTest( - _PoissonLogNormalQuadratureCompoundTest, tf.test.TestCase): + _PoissonLogNormalQuadratureCompoundTest, test_case.TestCase): @property def static_shape(self): @@ -134,7 +134,7 @@ def static_shape(self): @test_util.run_all_in_graph_and_eager_modes class PoissonLogNormalQuadratureCompoundDynamicShapeTest( - _PoissonLogNormalQuadratureCompoundTest, tf.test.TestCase): + _PoissonLogNormalQuadratureCompoundTest, test_case.TestCase): @property def static_shape(self): diff --git a/tensorflow_probability/python/distributions/relaxed_bernoulli_test.py b/tensorflow_probability/python/distributions/relaxed_bernoulli_test.py index 149af89c65..4a204f6f7b 100644 --- a/tensorflow_probability/python/distributions/relaxed_bernoulli_test.py +++ b/tensorflow_probability/python/distributions/relaxed_bernoulli_test.py @@ -19,20 +19,20 @@ from __future__ import print_function # Dependency imports + import numpy as np import scipy.special import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class RelaxedBernoulliTest(tf.test.TestCase): +class RelaxedBernoulliTest(test_case.TestCase): def testP(self): """Tests that parameter P is set correctly. Note that dist.p != dist.pdf.""" diff --git a/tensorflow_probability/python/distributions/relaxed_onehot_categorical_test.py b/tensorflow_probability/python/distributions/relaxed_onehot_categorical_test.py index 1f3902819f..101f5d49a3 100644 --- a/tensorflow_probability/python/distributions/relaxed_onehot_categorical_test.py +++ b/tensorflow_probability/python/distributions/relaxed_onehot_categorical_test.py @@ -19,17 +19,17 @@ from __future__ import print_function # Dependency imports + import numpy as np from scipy.special import gamma import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import tensorshape_util from tensorflow_probability.python.internal import test_case -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import def make_relaxed_categorical(batch_shape, num_classes, dtype=tf.float32): @@ -83,7 +83,7 @@ def analytical_pdf(x, temperature, logits): @test_util.run_all_in_graph_and_eager_modes -class RelaxedOneHotCategoricalTest(tf.test.TestCase): +class RelaxedOneHotCategoricalTest(test_case.TestCase): def assertRaises(self, error_class, msg): if tf.executing_eagerly(): diff --git a/tensorflow_probability/python/distributions/sample_test.py b/tensorflow_probability/python/distributions/sample_test.py index 806c4f10da..19bd8b94ab 100644 --- a/tensorflow_probability/python/distributions/sample_test.py +++ b/tensorflow_probability/python/distributions/sample_test.py @@ -19,22 +19,20 @@ from __future__ import print_function # Dependency imports + from absl.testing import parameterized import numpy as np - import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top - -tfb = tfp.bijectors -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class SampleDistributionTest(tf.test.TestCase, parameterized.TestCase): +class SampleDistributionTest(test_case.TestCase, parameterized.TestCase): def test_everything_scalar(self): s = tfd.Sample(tfd.Normal(loc=0, scale=1), 5, validate_args=True) diff --git a/tensorflow_probability/python/distributions/student_t_process_test.py b/tensorflow_probability/python/distributions/student_t_process_test.py index 4100f94bd5..139e883130 100644 --- a/tensorflow_probability/python/distributions/student_t_process_test.py +++ b/tensorflow_probability/python/distributions/student_t_process_test.py @@ -25,6 +25,7 @@ from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python import positive_semidefinite_kernels as psd_kernels from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -235,12 +236,12 @@ def testMarginalHasCorrectTypes(self): @test_util.run_all_in_graph_and_eager_modes -class StudentTProcessStaticTest(_StudentTProcessTest, tf.test.TestCase): +class StudentTProcessStaticTest(_StudentTProcessTest, test_case.TestCase): is_static = True @test_util.run_all_in_graph_and_eager_modes -class StudentTProcessDynamicTest(_StudentTProcessTest, tf.test.TestCase): +class StudentTProcessDynamicTest(_StudentTProcessTest, test_case.TestCase): is_static = False diff --git a/tensorflow_probability/python/distributions/transformed_distribution_test.py b/tensorflow_probability/python/distributions/transformed_distribution_test.py index 39ea0f5823..35f4fe92e7 100644 --- a/tensorflow_probability/python/distributions/transformed_distribution_test.py +++ b/tensorflow_probability/python/distributions/transformed_distribution_test.py @@ -19,19 +19,19 @@ from __future__ import print_function # Dependency imports + import numpy as np from scipy import stats - import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfb = tfp.bijectors -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import class DummyMatrixTransform(tfb.Bijector): @@ -91,7 +91,7 @@ def _gather_loc(self, z): @test_util.run_all_in_graph_and_eager_modes -class TransformedDistributionTest(tf.test.TestCase): +class TransformedDistributionTest(test_case.TestCase): def _cls(self): return tfd.TransformedDistribution @@ -331,7 +331,7 @@ def testScalarBatchScalarEventIdentityScale(self): self.assertAllClose(base_log_prob - ildj, log_prob_, rtol=1e-6, atol=0.) -class ScalarToMultiTest(tf.test.TestCase): +class ScalarToMultiTest(test_case.TestCase): def _cls(self): return tfd.TransformedDistribution diff --git a/tensorflow_probability/python/distributions/uniform_test.py b/tensorflow_probability/python/distributions/uniform_test.py index 40dc3fbafa..9b9449938a 100644 --- a/tensorflow_probability/python/distributions/uniform_test.py +++ b/tensorflow_probability/python/distributions/uniform_test.py @@ -18,22 +18,21 @@ from __future__ import division from __future__ import print_function - # Dependency imports + import numpy as np from scipy import stats as sp_stats - import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class UniformTest(tf.test.TestCase): +class UniformTest(test_case.TestCase): def testUniformRange(self): a = 3.0 diff --git a/tensorflow_probability/python/distributions/variational_gaussian_process_test.py b/tensorflow_probability/python/distributions/variational_gaussian_process_test.py index 8a73e48314..388ddc605c 100644 --- a/tensorflow_probability/python/distributions/variational_gaussian_process_test.py +++ b/tensorflow_probability/python/distributions/variational_gaussian_process_test.py @@ -26,8 +26,8 @@ from tensorflow_probability import distributions as tfd from tensorflow_probability import positive_semidefinite_kernels as psd_kernels - from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -258,13 +258,13 @@ def testVariationalLossShapes(self): @test_util.run_all_in_graph_and_eager_modes class VariationalGaussianProcessStaticTest( - _VariationalGaussianProcessTest, tf.test.TestCase): + _VariationalGaussianProcessTest, test_case.TestCase): is_static = True @test_util.run_all_in_graph_and_eager_modes class VariationalGaussianProcessDynamicTest( - _VariationalGaussianProcessTest, tf.test.TestCase): + _VariationalGaussianProcessTest, test_case.TestCase): is_static = False diff --git a/tensorflow_probability/python/distributions/vector_diffeomixture_test.py b/tensorflow_probability/python/distributions/vector_diffeomixture_test.py index 34a1afc987..e772f68644 100644 --- a/tensorflow_probability/python/distributions/vector_diffeomixture_test.py +++ b/tensorflow_probability/python/distributions/vector_diffeomixture_test.py @@ -19,13 +19,13 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -tfd = tfp.distributions from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top rng = np.random.RandomState(0) @@ -33,7 +33,7 @@ @test_util.run_all_in_graph_and_eager_modes class VectorDiffeomixtureTest(tfp_test_util.VectorDistributionTestHelpers, - tf.test.TestCase): + test_case.TestCase): """Tests the VectorDiffeomixture distribution.""" def testSampleProbConsistentBroadcastMixNoBatch(self): diff --git a/tensorflow_probability/python/distributions/vector_exponential_diag_test.py b/tensorflow_probability/python/distributions/vector_exponential_diag_test.py index 17894d1527..0ebe14319a 100644 --- a/tensorflow_probability/python/distributions/vector_exponential_diag_test.py +++ b/tensorflow_probability/python/distributions/vector_exponential_diag_test.py @@ -19,18 +19,18 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class VectorExponentialDiagTest(tf.test.TestCase): +class VectorExponentialDiagTest(test_case.TestCase): """Well tested because this is a simple override of the base class.""" def setUp(self): diff --git a/tensorflow_probability/python/distributions/vector_laplace_diag_test.py b/tensorflow_probability/python/distributions/vector_laplace_diag_test.py index 2c59cbf664..b1a002397f 100644 --- a/tensorflow_probability/python/distributions/vector_laplace_diag_test.py +++ b/tensorflow_probability/python/distributions/vector_laplace_diag_test.py @@ -19,18 +19,19 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class VectorLaplaceDiagTest(tf.test.TestCase): +class VectorLaplaceDiagTest(test_case.TestCase): """Well tested because this is a simple override of the base class.""" def setUp(self): diff --git a/tensorflow_probability/python/distributions/vector_sinh_arcsinh_diag_test.py b/tensorflow_probability/python/distributions/vector_sinh_arcsinh_diag_test.py index dae9cd7c25..1d565f321f 100644 --- a/tensorflow_probability/python/distributions/vector_sinh_arcsinh_diag_test.py +++ b/tensorflow_probability/python/distributions/vector_sinh_arcsinh_diag_test.py @@ -19,19 +19,21 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -tfd = tfp.distributions from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + rng = np.random.RandomState(123) @test_util.run_all_in_graph_and_eager_modes class VectorSinhArcsinhDiagTest(tfp_test_util.VectorDistributionTestHelpers, - tf.test.TestCase): + test_case.TestCase): def test_default_is_same_as_normal(self): d = 10 diff --git a/tensorflow_probability/python/distributions/vector_student_t_test.py b/tensorflow_probability/python/distributions/vector_student_t_test.py index 1e05f92a4b..5060e523b6 100644 --- a/tensorflow_probability/python/distributions/vector_student_t_test.py +++ b/tensorflow_probability/python/distributions/vector_student_t_test.py @@ -26,7 +26,7 @@ import tensorflow.compat.v2 as tf from tensorflow_probability.python.distributions.vector_student_t import _VectorStudentT - +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -72,7 +72,7 @@ def prob(self, x): @test_util.run_all_in_graph_and_eager_modes -class VectorStudentTTest(tf.test.TestCase): +class VectorStudentTTest(test_case.TestCase): def setUp(self): super(VectorStudentTTest, self).setUp() diff --git a/tensorflow_probability/python/distributions/von_mises_fisher_test.py b/tensorflow_probability/python/distributions/von_mises_fisher_test.py index 1c4c46b4ab..448a842b4b 100644 --- a/tensorflow_probability/python/distributions/von_mises_fisher_test.py +++ b/tensorflow_probability/python/distributions/von_mises_fisher_test.py @@ -28,13 +28,14 @@ from tensorflow_probability.python.distributions.von_mises_fisher import _bessel_ive from tensorflow_probability.python.internal import dtype_util from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes class VonMisesFisherTest(tfp_test_util.VectorDistributionTestHelpers, - tf.test.TestCase): + test_case.TestCase): def testBesselIve(self): self.assertRaises(ValueError, lambda: _bessel_ive(2.0, 1.0)) diff --git a/tensorflow_probability/python/distributions/wishart_test.py b/tensorflow_probability/python/distributions/wishart_test.py index b96b0baddc..bd0ce88b29 100644 --- a/tensorflow_probability/python/distributions/wishart_test.py +++ b/tensorflow_probability/python/distributions/wishart_test.py @@ -19,17 +19,18 @@ from __future__ import print_function # Dependency imports + import numpy as np from scipy import linalg import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import tensorshape_util +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import def make_pd(start, n): @@ -53,7 +54,7 @@ def wishart_var(df, x): @test_util.run_all_in_graph_and_eager_modes -class WishartTest(tf.test.TestCase): +class WishartTest(test_case.TestCase): def testEntropy(self): scale = make_pd(1., 2) diff --git a/tensorflow_probability/python/edward2/__init__.py b/tensorflow_probability/python/edward2/__init__.py index 07ef70046b..c6cd9fc8f4 100644 --- a/tensorflow_probability/python/edward2/__init__.py +++ b/tensorflow_probability/python/edward2/__init__.py @@ -28,6 +28,8 @@ from __future__ import division from __future__ import print_function +import warnings + # pylint: disable=wildcard-import from tensorflow_probability.python.experimental.edward2.generated_random_variables import * from tensorflow_probability.python.experimental.edward2.generated_random_variables import as_random_variable @@ -43,6 +45,11 @@ from tensorflow.python.util.all_util import remove_undocumented +warnings.warn( + "tfp.edward2 module is deprecated and will be removed on " + "2019-12-01. Use https://github.com/google/edward2 library instead.", + stacklevel=5) + _allowed_symbols = list(rv_dict.keys()) + [ "RandomVariable", "as_random_variable", diff --git a/tensorflow_probability/python/experimental/auto_batching/BUILD b/tensorflow_probability/python/experimental/auto_batching/BUILD index e240cfe8c1..25b8f11b14 100644 --- a/tensorflow_probability/python/experimental/auto_batching/BUILD +++ b/tensorflow_probability/python/experimental/auto_batching/BUILD @@ -99,7 +99,9 @@ py_library( "virtual_machine.py", ], srcs_version = "PY2AND3", - deps = [":instructions"], + deps = [ + ":instructions", + ], ) py_library( @@ -108,7 +110,9 @@ py_library( "liveness.py", ], srcs_version = "PY2AND3", - deps = [":instructions"], + deps = [ + ":instructions", + ], ) py_library( @@ -163,7 +167,9 @@ py_library( "dsl.py", ], srcs_version = "PY2AND3", - deps = [":instructions"], + deps = [ + ":instructions", + ], ) py_library( @@ -210,11 +216,13 @@ py_library( py_library( name = "tfp_xla_test_case", + testonly = 1, srcs = ["tfp_xla_test_case.py"], srcs_version = "PY2AND3", deps = [ ":xla", # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -235,6 +243,7 @@ py_test( ":instructions", ":test_programs", # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -250,6 +259,7 @@ py_test( # hypothesis dep, # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -265,6 +275,7 @@ py_test( # hypothesis dep, # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -280,6 +291,7 @@ py_test( ":test_programs", ":virtual_machine", # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -293,6 +305,7 @@ py_test( ":instructions", ":test_programs", # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -307,6 +320,7 @@ py_test( ":stackless", ":test_programs", ":tf_backend", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -318,6 +332,7 @@ py_test( deps = [ ":stack_optimization", ":test_programs", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -338,6 +353,7 @@ py_test( # absl/testing:parameterized dep, # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -356,6 +372,7 @@ py_test( ":virtual_machine", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -374,5 +391,6 @@ py_test( ":virtual_machine", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/experimental/auto_batching/allocation_strategy_test.py b/tensorflow_probability/python/experimental/auto_batching/allocation_strategy_test.py index 08c74248f8..a92fc24226 100644 --- a/tensorflow_probability/python/experimental/auto_batching/allocation_strategy_test.py +++ b/tensorflow_probability/python/experimental/auto_batching/allocation_strategy_test.py @@ -19,11 +19,12 @@ from __future__ import print_function # Dependency imports -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.auto_batching import allocation_strategy from tensorflow_probability.python.experimental.auto_batching import instructions as inst from tensorflow_probability.python.experimental.auto_batching import test_programs +from tensorflow_probability.python.internal import test_case def strip_pop_ops(program): @@ -42,7 +43,7 @@ def walk_graph(graph): walk_graph(func.graph) -class AllocationStrategyTest(tf.test.TestCase): +class AllocationStrategyTest(test_case.TestCase): def assertAllocates(self, expected, prog): allocated = allocation_strategy.optimize(prog) diff --git a/tensorflow_probability/python/experimental/auto_batching/dsl_test.py b/tensorflow_probability/python/experimental/auto_batching/dsl_test.py index 3c4130bf98..03c45a2a9d 100644 --- a/tensorflow_probability/python/experimental/auto_batching/dsl_test.py +++ b/tensorflow_probability/python/experimental/auto_batching/dsl_test.py @@ -19,7 +19,7 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.auto_batching import allocation_strategy from tensorflow_probability.python.experimental.auto_batching import dsl @@ -29,10 +29,11 @@ from tensorflow_probability.python.experimental.auto_batching import tf_backend from tensorflow_probability.python.experimental.auto_batching import type_inference from tensorflow_probability.python.experimental.auto_batching import virtual_machine as vm - +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import TF_BACKEND = tf_backend.TensorFlowBackend() + NP_BACKEND = numpy_backend.NumpyBackend() @@ -115,7 +116,7 @@ def my_type(_): @test_util.run_all_in_graph_and_eager_modes -class AutoBatchingTest(tf.test.TestCase): +class AutoBatchingTest(test_case.TestCase): def testAutoBatchingFibonacciNumpy(self): for inputs, outputs in ([5], [8]), ([5, 6, 8, 9], [8, 13, 34, 55]): diff --git a/tensorflow_probability/python/experimental/auto_batching/frontend.py b/tensorflow_probability/python/experimental/auto_batching/frontend.py index 811bc0c8f1..a45def1998 100644 --- a/tensorflow_probability/python/experimental/auto_batching/frontend.py +++ b/tensorflow_probability/python/experimental/auto_batching/frontend.py @@ -23,7 +23,7 @@ import gast import six -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.auto_batching import allocation_strategy from tensorflow_probability.python.experimental.auto_batching import dsl diff --git a/tensorflow_probability/python/experimental/auto_batching/frontend_test.py b/tensorflow_probability/python/experimental/auto_batching/frontend_test.py index 98f9054f7c..8d327a0f92 100644 --- a/tensorflow_probability/python/experimental/auto_batching/frontend_test.py +++ b/tensorflow_probability/python/experimental/auto_batching/frontend_test.py @@ -24,16 +24,18 @@ from absl import logging import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.auto_batching import frontend from tensorflow_probability.python.experimental.auto_batching import instructions from tensorflow_probability.python.experimental.auto_batching import numpy_backend from tensorflow_probability.python.experimental.auto_batching import tf_backend - +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import TF_BACKEND = tf_backend.TensorFlowBackend() + NP_BACKEND = numpy_backend.NumpyBackend() @@ -48,7 +50,7 @@ def fibonacci(n): @test_util.run_all_in_graph_and_eager_modes -class AutoGraphFrontendTest(tf.test.TestCase): +class AutoGraphFrontendTest(test_case.TestCase): def testFibonacci(self): self.assertEqual(1, fibonacci(0)) @@ -587,7 +589,7 @@ def _build_tensor(self, ndarray): shape = ndarray.shape else: shape = [None] + list(ndarray.shape[1:]) - return tf.compat.v1.placeholder_with_default(input=ndarray, shape=shape) + return tf1.placeholder_with_default(input=ndarray, shape=shape) def _check_batch_size(self, tensor, expected): if self.use_static_batch_size or tf.executing_eagerly(): @@ -671,12 +673,12 @@ def an_autobatch_function(x): @test_util.run_all_in_graph_and_eager_modes -class TestTFStaticBatchSize(tf.test.TestCase, _TestHidingTFBatchSize): +class TestTFStaticBatchSize(test_case.TestCase, _TestHidingTFBatchSize): use_static_batch_size = True @test_util.run_all_in_graph_and_eager_modes -class TestTFDynamicBatchSize(tf.test.TestCase, _TestHidingTFBatchSize): +class TestTFDynamicBatchSize(test_case.TestCase, _TestHidingTFBatchSize): use_static_batch_size = False if __name__ == '__main__': diff --git a/tensorflow_probability/python/experimental/auto_batching/instructions.py b/tensorflow_probability/python/experimental/auto_batching/instructions.py index 04f1b859b6..f0d0fb769a 100644 --- a/tensorflow_probability/python/experimental/auto_batching/instructions.py +++ b/tensorflow_probability/python/experimental/auto_batching/instructions.py @@ -27,7 +27,7 @@ from absl import logging import numpy as np import six -import tensorflow as tf +import tensorflow.compat.v2 as tf __all__ = [ diff --git a/tensorflow_probability/python/experimental/auto_batching/instructions_test.py b/tensorflow_probability/python/experimental/auto_batching/instructions_test.py index 105ef86e37..beb5147356 100644 --- a/tensorflow_probability/python/experimental/auto_batching/instructions_test.py +++ b/tensorflow_probability/python/experimental/auto_batching/instructions_test.py @@ -19,13 +19,14 @@ from __future__ import print_function # Dependency imports -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.auto_batching import instructions from tensorflow_probability.python.experimental.auto_batching import test_programs +from tensorflow_probability.python.internal import test_case -class InstructionsTest(tf.test.TestCase): +class InstructionsTest(test_case.TestCase): def testConstant(self): # This program always returns 2. @@ -220,7 +221,7 @@ def main(n1): """.strip() -class PrettyPrintingTest(tf.test.TestCase): +class PrettyPrintingTest(test_case.TestCase): def verify_program_pretty_print(self, expected_text, program, **kwargs): actual_text = str(program.__str__(**kwargs)) diff --git a/tensorflow_probability/python/experimental/auto_batching/lowering_test.py b/tensorflow_probability/python/experimental/auto_batching/lowering_test.py index 8490a0bad0..ea966ddca2 100644 --- a/tensorflow_probability/python/experimental/auto_batching/lowering_test.py +++ b/tensorflow_probability/python/experimental/auto_batching/lowering_test.py @@ -19,13 +19,14 @@ from __future__ import print_function # Dependency imports -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.auto_batching import allocation_strategy from tensorflow_probability.python.experimental.auto_batching import lowering from tensorflow_probability.python.experimental.auto_batching import numpy_backend from tensorflow_probability.python.experimental.auto_batching import test_programs from tensorflow_probability.python.experimental.auto_batching import virtual_machine as vm +from tensorflow_probability.python.internal import test_case NP_BACKEND = numpy_backend.NumpyBackend() @@ -49,7 +50,7 @@ def _is_even_lowered_execute(inputs, backend): max_stack_depth=int(max(inputs)) + 3, backend=backend)) -class LoweringTest(tf.test.TestCase): +class LoweringTest(test_case.TestCase): def testLoweringFibonacciNumpy(self): self.assertEqual([8], _fibonacci_lowered_execute([5], NP_BACKEND)) diff --git a/tensorflow_probability/python/experimental/auto_batching/numpy_backend_test.py b/tensorflow_probability/python/experimental/auto_batching/numpy_backend_test.py index f71b867bee..311c38edcb 100644 --- a/tensorflow_probability/python/experimental/auto_batching/numpy_backend_test.py +++ b/tensorflow_probability/python/experimental/auto_batching/numpy_backend_test.py @@ -23,11 +23,12 @@ from hypothesis import strategies as hps from hypothesis.extra import numpy as hpnp import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.auto_batching import backend_test_lib as backend_test from tensorflow_probability.python.experimental.auto_batching import instructions as inst from tensorflow_probability.python.experimental.auto_batching import numpy_backend +from tensorflow_probability.python.internal import test_case NP_BACKEND = numpy_backend.NumpyBackend() @@ -43,7 +44,7 @@ def var_init(max_stack_depth, initial_value): # A TF test case for self.assertAllEqual, but doesn't use TF so doesn't care # about Eager vs Graph mode. -class NumpyVariableTest(tf.test.TestCase, backend_test.VariableTestCase): +class NumpyVariableTest(test_case.TestCase, backend_test.VariableTestCase): def testNumpySmoke(self): """Test the property on specific example, without relying on Hypothesis.""" diff --git a/tensorflow_probability/python/experimental/auto_batching/stack_optimization_test.py b/tensorflow_probability/python/experimental/auto_batching/stack_optimization_test.py index b54cefba09..37b2a8c170 100644 --- a/tensorflow_probability/python/experimental/auto_batching/stack_optimization_test.py +++ b/tensorflow_probability/python/experimental/auto_batching/stack_optimization_test.py @@ -17,13 +17,14 @@ from __future__ import print_function # Dependency imports -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.auto_batching import stack_optimization as stack from tensorflow_probability.python.experimental.auto_batching import test_programs +from tensorflow_probability.python.internal import test_case -class StackOptimizationTest(tf.test.TestCase): +class StackOptimizationTest(test_case.TestCase): def testPopPushFusionPrettyPrint(self): # Testing two things: That pop-push fusion does the expected thing, and that diff --git a/tensorflow_probability/python/experimental/auto_batching/stackless_test.py b/tensorflow_probability/python/experimental/auto_batching/stackless_test.py index 14227af6c0..60c7e64012 100644 --- a/tensorflow_probability/python/experimental/auto_batching/stackless_test.py +++ b/tensorflow_probability/python/experimental/auto_batching/stackless_test.py @@ -20,13 +20,15 @@ # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.auto_batching import allocation_strategy from tensorflow_probability.python.experimental.auto_batching import numpy_backend from tensorflow_probability.python.experimental.auto_batching import stackless from tensorflow_probability.python.experimental.auto_batching import test_programs from tensorflow_probability.python.experimental.auto_batching import tf_backend +from tensorflow_probability.python.internal import test_case TF_BACKEND = tf_backend.TensorFlowBackend() NP_BACKEND = numpy_backend.NumpyBackend() @@ -45,7 +47,7 @@ def _is_even_stackless_execute(inputs, backend): # Stackless autobatching doesn't work in TF graph mode. -class StacklessTest(tf.test.TestCase): +class StacklessTest(test_case.TestCase): def testStacklessFibonacciNumpy(self): self.assertEqual([8], list(_fibonacci_stackless_execute([5], NP_BACKEND))) @@ -80,5 +82,5 @@ def testStacklessIsEvenTF(self): self.evaluate(_is_even_stackless_execute([5, 6, 8, 9, 0], TF_BACKEND))) if __name__ == '__main__': - tf.compat.v1.enable_eager_execution() + tf1.enable_eager_execution() tf.test.main() diff --git a/tensorflow_probability/python/experimental/auto_batching/tf_backend.py b/tensorflow_probability/python/experimental/auto_batching/tf_backend.py index 5512dd582c..b59bb26348 100644 --- a/tensorflow_probability/python/experimental/auto_batching/tf_backend.py +++ b/tensorflow_probability/python/experimental/auto_batching/tf_backend.py @@ -26,7 +26,8 @@ # Dependency imports import six -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.auto_batching import instructions from tensorflow_probability.python.experimental.auto_batching import xla @@ -46,7 +47,7 @@ def _control_flow_v2(): def _generalized_where(mask, value, old_value): - """Version of tf.compat.v1.where that broadcasts `value` to `old_value`.""" + """Version of tf1.where that broadcasts `value` to `old_value`.""" mask = tf.convert_to_tensor(value=mask, name='mask') mask.shape.assert_has_rank(1) value = tf.convert_to_tensor(value=value, name='value', dtype=old_value.dtype) @@ -58,7 +59,7 @@ def _generalized_where(mask, value, old_value): value |= tf.zeros_like(old_value) else: value += tf.zeros_like(old_value) - new_value = tf.compat.v1.where(mask, value, old_value, name='new_value') + new_value = tf1.where(mask, value, old_value, name='new_value') # TODO(b/78655271): Do we need 'new_val.set_shape(old_value.shape)'? return new_value @@ -139,12 +140,12 @@ def pop(self, mask, name=None): by `mask`. read: The batch of values at the newly-current stack frame. """ - with tf.compat.v2.name_scope(name or 'Stack.pop'): + with tf.name_scope(name or 'Stack.pop'): mask = tf.convert_to_tensor(value=mask, name='mask') new_stack_index = self.stack_index - tf.cast(mask, self.stack_index.dtype) if self._safety_checks(): with tf.control_dependencies( - [tf.compat.v1.assert_greater_equal( + [tf1.assert_greater_equal( new_stack_index, tf.constant(0, new_stack_index.dtype))]): new_stack_index = tf.identity(new_stack_index) new_stack_index.set_shape(self.stack_index.shape) @@ -181,7 +182,7 @@ def push(self, value, mask, name=None): asserted_value: A assertion-bound snapshot of the input `value`, assertions used to catch stack overflows. """ - with tf.compat.v2.name_scope(name or 'Stack.push'): + with tf.name_scope(name or 'Stack.push'): value = tf.convert_to_tensor(value=value, name='value') mask = tf.convert_to_tensor(value=mask, name='mask') # self.stack: [max_stack_depth * batch_size, ...] @@ -206,7 +207,7 @@ def push(self, value, mask, name=None): on_value=True, off_value=False, dtype=tf.bool) - new_stack = tf.compat.v1.where( + new_stack = tf1.where( tf.reshape(update_stack_mask, [-1]), tf.reshape(tiled_value, tf.shape(input=self.stack)), self.stack) new_stack.set_shape(self.stack.shape) @@ -214,7 +215,7 @@ def push(self, value, mask, name=None): new_stack_index.set_shape(self.stack_index.shape) if self._safety_checks(): with tf.control_dependencies( - [tf.compat.v1.assert_less( + [tf1.assert_less( new_stack_index, tf.cast( max_stack_depth_tensor, new_stack_index.dtype))]): value = tf.identity(value) @@ -244,7 +245,7 @@ def _create_stack(max_stack_depth, value, safety_checks=True, name=None): Returns: stack: An initialized Stack object. """ - with tf.compat.v2.name_scope(name or 'Stack.initialize'): + with tf.name_scope(name or 'Stack.initialize'): value = tf.convert_to_tensor(value=value, name='value') batch_size = _get_leftmost_dim_size(value) # Home the stack index in the same memory space as the value. The @@ -283,7 +284,7 @@ def _name(self): def read(self, name=None): """Returns the batch of top values.""" - with tf.compat.v2.name_scope(name or '{}.read'.format(self._name())): + with tf.name_scope(name or '{}.read'.format(self._name())): return tf.identity(self.current) def update(self, value, mask, name=None): @@ -300,7 +301,7 @@ def update(self, value, mask, name=None): Returns: var: Updated variable. Does not mutate `self`. """ - with tf.compat.v2.name_scope(name or '{}.update'.format(self._name())): + with tf.name_scope(name or '{}.update'.format(self._name())): new_value = _generalized_where(mask, value, self.current) return type(self)(new_value, self.stack) @@ -318,7 +319,7 @@ def push(self, mask, name=None): Returns: var: Updated variable. Does not mutate `self`. """ - with tf.compat.v2.name_scope(name or '{}.push'.format(self._name())): + with tf.name_scope(name or '{}.push'.format(self._name())): new_stack, asserted_value = self.stack.push(self.current, mask) return type(self)(asserted_value, new_stack) @@ -333,10 +334,10 @@ def pop(self, mask, name=None): Returns: var: Updated variable. Does not mutate `self`. """ - with tf.compat.v2.name_scope(name or '{}.pop'.format(self._name())): + with tf.name_scope(name or '{}.pop'.format(self._name())): mask = tf.convert_to_tensor(value=mask, name='mask') new_stack, stack_value = self.stack.pop(mask) - new_value = tf.compat.v1.where( + new_value = tf1.where( mask, stack_value, self.current, name='new_value') return type(self)(new_value, new_stack) @@ -409,7 +410,7 @@ def run_on_dummies(self, primitive_callable, input_types): outputs: pattern of backend-specific objects whose types may be analyzed by the caller with `type_of`. """ - with tf.compat.v2.name_scope('VM.run_on_dummies'): + with tf.name_scope('VM.run_on_dummies'): # We cannot use a temporary graph in eager mode because user code may # close over eager tensors, causing `RuntimeError: Attempting to capture # an EagerTensor without building a function.` @@ -474,7 +475,7 @@ def assert_matching_dtype(self, expected_dtype, value, message=''): Raises: ValueError: If dtype does not match. """ - with tf.compat.v2.name_scope('VM.assert_matching_dtype'): + with tf.name_scope('VM.assert_matching_dtype'): value = tf.convert_to_tensor( value=value, name='value', dtype=expected_dtype) if value.dtype.base_dtype.as_numpy_dtype != expected_dtype: @@ -483,7 +484,7 @@ def assert_matching_dtype(self, expected_dtype, value, message=''): def batch_size(self, value, name=None): """Returns the first (batch) dimension of `value`.""" - with tf.compat.v2.name_scope(name or 'VM.batch_size'): + with tf.name_scope(name or 'VM.batch_size'): value = tf.convert_to_tensor(value=value, name='value') return _get_leftmost_dim_size(value) @@ -506,7 +507,7 @@ def fill(self, value, size, dtype, shape, name=None): Returns: result: `Tensor` of `dtype` `value`s with shape `[size, *shape]` """ - with tf.compat.v2.name_scope(name or 'VM.fill'): + with tf.name_scope(name or 'VM.fill'): size = tf.convert_to_tensor(value=size, name='size') shape = tf.convert_to_tensor(value=shape, name='shape', dtype=size.dtype) return tf.fill(tf.concat([[size], shape], axis=0), @@ -535,7 +536,7 @@ def create_variable(self, name, alloc, type_, max_stack_depth, batch_size): name = 'Variable' if name is None else 'VM.var_{}'.format(name) dtype, event_shape = type_ - with tf.compat.v2.name_scope('{}.initialize'.format(name)): + with tf.name_scope('{}.initialize'.format(name)): if (alloc is instructions.VariableAllocation.REGISTER and tf.executing_eagerly()): # Don't need to construct the empty value in Eager mode, because there @@ -563,7 +564,7 @@ def _name(self): def full_mask(self, size, name=None): """Returns an all-True mask `Tensor` with shape `[size]`.""" - with tf.compat.v2.name_scope(name or 'VM.full_mask'): + with tf.name_scope(name or 'VM.full_mask'): size = tf.convert_to_tensor(value=size, name='size') return tf.ones(size, dtype=tf.bool) @@ -585,8 +586,8 @@ def broadcast_to_shape_of(self, val, target, name=None): returned value will be the shape of `target`. """ # TODO(b/78594182): This is a compatibility shim, required because - # `tf.compat.v1.where` does not support broadcasting of its value operands. - with tf.compat.v2.name_scope(name or 'VM.broadcast_to_shape_of'): + # `tf1.where` does not support broadcasting of its value operands. + with tf.name_scope(name or 'VM.broadcast_to_shape_of'): dtype = getattr(target, 'dtype', getattr(val, 'dtype', None)) target = tf.convert_to_tensor(value=target, name='target', dtype=dtype) val = tf.convert_to_tensor(value=val, name='val', dtype=target.dtype) @@ -608,7 +609,7 @@ def cond(self, pred, true_fn, false_fn, name=None): Returns: state: Output state, matching nest structure of input argument `state`. """ - with tf.compat.v2.name_scope(name or 'VM.cond'): + with tf.name_scope(name or 'VM.cond'): with _control_flow_v2(): return tf.cond(pred=pred, true_fn=true_fn, false_fn=false_fn) @@ -632,10 +633,10 @@ def prepare_for_cond(self, state): # Eager, lazy initialization for register variables means that the state # may not always be correct to convert to a Tensor. return state - with tf.compat.v2.name_scope('VM.prepare_for_cond'): + with tf.name_scope('VM.prepare_for_cond'): state_flat = [tf.convert_to_tensor(value=x) - for x in tf.compat.v2.nest.flatten(state)] - return tf.compat.v2.nest.pack_sequence_as(state, state_flat) + for x in tf.nest.flatten(state)] + return tf.nest.pack_sequence_as(state, state_flat) def where(self, condition, x, y, name=None): """Implements a where selector for the TF backend. @@ -656,21 +657,21 @@ def where(self, condition, x, y, name=None): masked: A broadcast-shaped `Tensor` where elements corresponding to `True` values of `condition` come from `x`, and others come from `y`. """ - with tf.compat.v2.name_scope(name or 'VM.where'): + with tf.name_scope(name or 'VM.where'): condition = tf.convert_to_tensor(value=condition, name='condition') dtype = getattr(x, 'dtype', getattr(y, 'dtype', None)) x = tf.convert_to_tensor(value=x, name='x', dtype=dtype) y = tf.convert_to_tensor(value=y, name='y', dtype=x.dtype) - return tf.compat.v1.where(condition, x, y) + return tf1.where(condition, x, y) def reduce_min(self, t, name=None): """Implements reduce_min for TF backend.""" - with tf.compat.v2.name_scope('VM.reduce_min'): + with tf.name_scope('VM.reduce_min'): return tf.reduce_min(input_tensor=t, name=name) def while_loop(self, cond, body, loop_vars, name=None): """Implements while loops for TF backend.""" - with tf.compat.v2.name_scope('VM.while_loop'): + with tf.name_scope('VM.while_loop'): if tf.executing_eagerly(): # The reg. variable optimization (see create_variable) may change loop # structure across iterations, which now triggers an exception for eager @@ -690,22 +691,22 @@ def while_loop(self, cond, body, loop_vars, name=None): def switch_case(self, branch_selector, branch_callables, name=None): """Implements a switch (branch_selector) { case ... } construct.""" - with tf.compat.v2.name_scope('VM.switch_case'): + with tf.name_scope('VM.switch_case'): with _control_flow_v2(): return tf.switch_case(branch_selector, branch_callables, name=name) def equal(self, t1, t2, name=None): """Implements equality comparison for TF backend.""" - with tf.compat.v2.name_scope('VM.equal'): + with tf.name_scope('VM.equal'): return tf.equal(t1, t2, name=name) def not_equal(self, t1, t2, name=None): """Implements inequality comparison for TF backend.""" - with tf.compat.v2.name_scope('VM.not_equal'): + with tf.name_scope('VM.not_equal'): return tf.not_equal(t1, t2, name=name) def any(self, t, name=None): - with tf.compat.v2.name_scope(name or 'VM.any'): + with tf.name_scope(name or 'VM.any'): return tf.reduce_any(input_tensor=t) def wrap_straightline_callable(self, f): @@ -741,7 +742,7 @@ def _init_f(env_dict, *args): def _get_leftmost_dim_size(x, name=None): """Returns the size of the left most dimension, statically if possible.""" - with tf.compat.v2.name_scope(name or 'get_leftmost_dim_size'): + with tf.name_scope(name or 'get_leftmost_dim_size'): x = tf.convert_to_tensor(value=x, name='x') if x.shape.ndims is None: # If tf.shape(x) is scalar, the [:1] will produce the empty list, whose diff --git a/tensorflow_probability/python/experimental/auto_batching/tf_backend_test.py b/tensorflow_probability/python/experimental/auto_batching/tf_backend_test.py index 5497b15412..800679342a 100644 --- a/tensorflow_probability/python/experimental/auto_batching/tf_backend_test.py +++ b/tensorflow_probability/python/experimental/auto_batching/tf_backend_test.py @@ -25,15 +25,16 @@ from hypothesis.extra import numpy as hpnp import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.auto_batching import backend_test_lib as backend_test from tensorflow_probability.python.experimental.auto_batching import instructions as inst from tensorflow_probability.python.experimental.auto_batching import tf_backend - +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import # TODO(b/127689162): Restore testing complex dtypes. + # TF_NP_DTYPES = [np.float32, np.float64, np.int32, np.complex64, np.bool] TF_NP_DTYPES = [np.float32, np.float64, np.int32, np.bool] TF_BACKEND = tf_backend.TensorFlowBackend() @@ -49,7 +50,7 @@ def var_init(max_stack_depth, initial_value): @test_util.run_all_in_graph_and_eager_modes -class TFVariableTest(tf.test.TestCase, backend_test.VariableTestCase): +class TFVariableTest(test_case.TestCase, backend_test.VariableTestCase): def testTFSmoke(self): """Test the property on specific example, without relying on Hypothesis.""" diff --git a/tensorflow_probability/python/experimental/auto_batching/tfp_xla_test_case.py b/tensorflow_probability/python/experimental/auto_batching/tfp_xla_test_case.py index 45f3ee3eaf..4738801b30 100644 --- a/tensorflow_probability/python/experimental/auto_batching/tfp_xla_test_case.py +++ b/tensorflow_probability/python/experimental/auto_batching/tfp_xla_test_case.py @@ -22,18 +22,20 @@ # Dependency imports from absl import flags -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.auto_batching import xla +from tensorflow_probability.python.internal import test_case from tensorflow.python.ops import control_flow_util # pylint: disable=g-direct-tensorflow-import - flags.DEFINE_string('test_device', None, + 'TensorFlow device on which to place operators under test') flags.DEFINE_string('tf_xla_flags', None, 'Value to set the TF_XLA_FLAGS environment variable to') FLAGS = flags.FLAGS -class TFPXLATestCase(tf.test.TestCase): +class TFPXLATestCase(test_case.TestCase): """TFP+XLA test harness.""" def __init__(self, method_name='runTest'): @@ -54,5 +56,5 @@ def tearDown(self): def wrap_fn(self, f): return xla.compile_nested_output( - f, (tf.compat.v1.tpu.rewrite if 'TPU' in self.device + f, (tf1.tpu.rewrite if 'TPU' in self.device else tf.xla.experimental.compile)) diff --git a/tensorflow_probability/python/experimental/auto_batching/type_inference_test.py b/tensorflow_probability/python/experimental/auto_batching/type_inference_test.py index ded9e1db5c..984d67ce56 100644 --- a/tensorflow_probability/python/experimental/auto_batching/type_inference_test.py +++ b/tensorflow_probability/python/experimental/auto_batching/type_inference_test.py @@ -26,7 +26,8 @@ import numpy as np import six -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.auto_batching import allocation_strategy from tensorflow_probability.python.experimental.auto_batching import instructions @@ -36,10 +37,11 @@ from tensorflow_probability.python.experimental.auto_batching import tf_backend from tensorflow_probability.python.experimental.auto_batching import type_inference from tensorflow_probability.python.experimental.auto_batching import virtual_machine as vm - +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import NP_BACKEND = numpy_backend.NumpyBackend() + TF_BACKEND = tf_backend.TensorFlowBackend() @@ -78,7 +80,7 @@ def _execute(prog, inputs, stack_depth, backend): @test_util.run_all_in_graph_and_eager_modes -class TypeInferenceTest(tf.test.TestCase, parameterized.TestCase): +class TypeInferenceTest(test_case.TestCase, parameterized.TestCase): def assertSameTypes(self, expected_prog, typed, check_dtypes=True): for v, type_ in six.iteritems(typed.var_defs): @@ -132,7 +134,7 @@ def testFibonacciTypeInferenceNumpy(self, dtype): for inputs, outputs in ([5], [8]), ([5, 6, 8, 9], [8, 13, 34, 55]): inputs = np.array(inputs, dtype=dtype) outputs = np.array(outputs, dtype=dtype) - tf.compat.v1.logging.debug('np.fib {} {} {}'.format( + tf1.logging.debug('np.fib {} {} {}'.format( dtype, inputs.shape, outputs.shape)) prog = test_programs.fibonacci_function_calls(include_types=False) typed = type_inference.infer_types(prog, [inputs], NP_BACKEND) @@ -152,7 +154,7 @@ def testFibonacciTypeInferenceTF(self, dtype): for inputs, outputs in ([5], [8]), ([5, 6, 8, 9], [8, 13, 34, 55]): inputs = np.array(inputs, dtype=dtype) outputs = np.array(outputs, dtype=dtype) - tf.compat.v1.logging.debug('tf.fib {} {} {}'.format( + tf1.logging.debug('tf.fib {} {} {}'.format( dtype, inputs.shape, outputs.shape)) inputs_t = tf.constant(inputs, dtype=dtype) prog = test_programs.fibonacci_function_calls(include_types=False) @@ -170,7 +172,7 @@ def testIsEvenTypeInferenceNumpy(self, dtype): ([5, 6, 0, 3], [False, True, True, False])]: inputs = np.array(inputs, dtype=dtype) outputs = np.array(outputs, dtype=np.bool) - tf.compat.v1.logging.debug('np.even {} {} {}'.format( + tf1.logging.debug('np.even {} {} {}'.format( dtype, inputs.shape, outputs.shape)) prog = test_programs.is_even_function_calls(include_types=False) typed = type_inference.infer_types(prog, [inputs], NP_BACKEND) @@ -192,7 +194,7 @@ def testIsEvenTypeInferenceTF(self, dtype): ([5, 6, 0, 3], [False, True, True, False])]: inputs = np.array(inputs, dtype=dtype) outputs = np.array(outputs, dtype=np.bool) - tf.compat.v1.logging.debug('tf.even {} {} {}'.format( + tf1.logging.debug('tf.even {} {} {}'.format( dtype, inputs.shape, outputs.shape)) inputs_t = tf.constant(inputs, dtype=dtype) prog = test_programs.is_even_function_calls(include_types=False) diff --git a/tensorflow_probability/python/experimental/auto_batching/xla.py b/tensorflow_probability/python/experimental/auto_batching/xla.py index 575aea285a..02d9843e8e 100644 --- a/tensorflow_probability/python/experimental/auto_batching/xla.py +++ b/tensorflow_probability/python/experimental/auto_batching/xla.py @@ -18,7 +18,7 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v2 as tf __all__ = ['compile_nested_output'] @@ -39,7 +39,7 @@ def compile_nested_output(f, compile_fn=None): g: Callable wrapping `f` which returns XLA-compiled, nested outputs. """ def _wrapper(*inputs): # pylint:disable=missing-docstring - nest = tf.compat.v2.nest + nest = tf.nest struct = [None] def _flattened(*inputs): result = f(*inputs) diff --git a/tensorflow_probability/python/experimental/edward2/BUILD b/tensorflow_probability/python/experimental/edward2/BUILD index 322ca80b6d..d024b6e4cb 100644 --- a/tensorflow_probability/python/experimental/edward2/BUILD +++ b/tensorflow_probability/python/experimental/edward2/BUILD @@ -60,6 +60,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -77,6 +78,7 @@ py_test( # absl/testing:parameterized dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -97,6 +99,7 @@ py_test( deps = [ # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -118,5 +121,6 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/experimental/edward2/README.md b/tensorflow_probability/python/experimental/edward2/README.md index 970339cc91..124e2accae 100644 --- a/tensorflow_probability/python/experimental/edward2/README.md +++ b/tensorflow_probability/python/experimental/edward2/README.md @@ -6,7 +6,7 @@ probabilistic programs and manipulate a model's computation for flexible training, latent variable inference, and predictions. Are you upgrading from Edward? Check out the guide -[`Upgrading_from_Edward_to_Edward2.md`](https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/edward2/Upgrading_From_Edward_To_Edward2.md). +[`Upgrading_from_Edward_to_Edward2.md`](https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/experimental/edward2/Upgrading_From_Edward_To_Edward2.md). ## 1. Models as Probabilistic Programs diff --git a/tensorflow_probability/python/experimental/edward2/generated_random_variables_test.py b/tensorflow_probability/python/experimental/edward2/generated_random_variables_test.py index c719d52d7e..b2c1d34200 100644 --- a/tensorflow_probability/python/experimental/edward2/generated_random_variables_test.py +++ b/tensorflow_probability/python/experimental/edward2/generated_random_variables_test.py @@ -22,16 +22,17 @@ from absl.testing import parameterized import numpy as np import six -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability import distributions as tfd from tensorflow_probability import edward2 as ed - +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class GeneratedRandomVariablesTest(parameterized.TestCase, tf.test.TestCase): +class GeneratedRandomVariablesTest(parameterized.TestCase, test_case.TestCase): def testBernoulliDoc(self): self.assertGreater(len(ed.Bernoulli.__doc__), 0) @@ -157,7 +158,7 @@ def testValueMismatchRaises(self): def testValueUnknownShape(self): if tf.executing_eagerly(): return # should not raise error - ed.Bernoulli(probs=0.5, value=tf.compat.v1.placeholder(tf.int32)) + ed.Bernoulli(probs=0.5, value=tf1.placeholder(tf.int32)) def testAsRandomVariable(self): # A wrapped Normal distribution should behave identically to @@ -177,7 +178,7 @@ def model_wrapped(): # Check that our attempt to back out the variable name from the # Distribution name is robust to name scoping. - with tf.compat.v1.name_scope("nested_scope"): + with tf1.name_scope("nested_scope"): dist = tfd.Normal(1., 0.1, name="x") def model_scoped(): return ed.as_random_variable(dist) diff --git a/tensorflow_probability/python/experimental/edward2/interceptor_test.py b/tensorflow_probability/python/experimental/edward2/interceptor_test.py index 9ef37b8bb1..2930ef3a81 100644 --- a/tensorflow_probability/python/experimental/edward2/interceptor_test.py +++ b/tensorflow_probability/python/experimental/edward2/interceptor_test.py @@ -20,15 +20,15 @@ from absl.testing import parameterized import six -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability import edward2 as ed - +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class InterceptorTest(parameterized.TestCase, tf.test.TestCase): +class InterceptorTest(parameterized.TestCase, test_case.TestCase): @parameterized.parameters( {"cls": ed.Normal, "value": 2., "kwargs": {"loc": 0.5, "scale": 1.}}, diff --git a/tensorflow_probability/python/experimental/edward2/program_transformations.py b/tensorflow_probability/python/experimental/edward2/program_transformations.py index 585e780c40..4914e9df18 100644 --- a/tensorflow_probability/python/experimental/edward2/program_transformations.py +++ b/tensorflow_probability/python/experimental/edward2/program_transformations.py @@ -20,7 +20,7 @@ import inspect import six -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.edward2.interceptor import interceptable from tensorflow_probability.python.experimental.edward2.interceptor import interception diff --git a/tensorflow_probability/python/experimental/edward2/program_transformations_test.py b/tensorflow_probability/python/experimental/edward2/program_transformations_test.py index 8735cb3aba..e88de816a9 100644 --- a/tensorflow_probability/python/experimental/edward2/program_transformations_test.py +++ b/tensorflow_probability/python/experimental/edward2/program_transformations_test.py @@ -18,17 +18,17 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf -import tensorflow_probability as tfp - +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability import edward2 as ed +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case -tfd = tfp.distributions from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class ProgramTransformationsTest(tf.test.TestCase): +class ProgramTransformationsTest(test_case.TestCase): def testMakeLogJointFnUnconditional(self): """Test `make_log_joint_fn` on unconditional Edward program.""" @@ -160,7 +160,7 @@ def true_log_joint(loc, flip, x): def testMakeLogJointFnTemplate(self): """Test `make_log_joint_fn` on program returned by tf.make_template.""" def variational(): - loc = tf.compat.v1.get_variable("loc", []) + loc = tf1.get_variable("loc", []) qz = ed.Normal(loc=loc, scale=0.5, name="qz") return qz @@ -170,16 +170,16 @@ def true_log_joint(loc, qz): return log_prob qz_value = 1.23 - variational_template = tf.compat.v1.make_template("variational", + variational_template = tf1.make_template("variational", variational) log_joint = ed.make_log_joint_fn(variational_template) expected_log_prob = log_joint(qz=qz_value) - loc = tf.compat.v1.trainable_variables("variational")[0] + loc = tf1.trainable_variables("variational")[0] actual_log_prob = true_log_joint(loc, qz_value) with self.cached_session() as sess: - sess.run(tf.compat.v1.initialize_all_variables()) + sess.run(tf1.initialize_all_variables()) actual_log_prob_, expected_log_prob_ = sess.run( [actual_log_prob, expected_log_prob]) self.assertEqual(actual_log_prob_, expected_log_prob_) diff --git a/tensorflow_probability/python/experimental/edward2/random_variable.py b/tensorflow_probability/python/experimental/edward2/random_variable.py index 4b24e2ad20..96720954ac 100644 --- a/tensorflow_probability/python/experimental/edward2/random_variable.py +++ b/tensorflow_probability/python/experimental/edward2/random_variable.py @@ -19,7 +19,8 @@ from __future__ import print_function import functools -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow.python.client import session as tf_session from tensorflow.python.framework import ops @@ -145,7 +146,7 @@ def sample_shape_tensor(self, name="sample_shape_tensor"): Returns: sample_shape: `Tensor`. """ - with tf.compat.v1.name_scope(name): + with tf1.name_scope(name): if isinstance(self._sample_shape, tf.Tensor): return self._sample_shape return tf.convert_to_tensor( diff --git a/tensorflow_probability/python/experimental/edward2/random_variable_test.py b/tensorflow_probability/python/experimental/edward2/random_variable_test.py index 497d9e1e59..74a81843c6 100644 --- a/tensorflow_probability/python/experimental/edward2/random_variable_test.py +++ b/tensorflow_probability/python/experimental/edward2/random_variable_test.py @@ -19,17 +19,18 @@ from __future__ import print_function import re + from absl.testing import parameterized import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - from tensorflow_probability import edward2 as ed +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top -tfd = tfp.distributions - class FakeDistribution(tfd.Distribution): """Fake distribution class for testing.""" @@ -43,7 +44,7 @@ def __init__(self): @test_util.run_all_in_graph_and_eager_modes -class RandomVariableTest(parameterized.TestCase, tf.test.TestCase): +class RandomVariableTest(parameterized.TestCase, test_case.TestCase): def testConstructor(self): x = ed.RandomVariable(tfd.Poisson(rate=tf.ones([2, 5])), @@ -298,7 +299,7 @@ def testSessionEval(self): if tf.executing_eagerly(): return with self.cached_session() as sess: x = ed.RandomVariable(tfd.Normal(0.0, 0.1)) - x_ph = tf.compat.v1.placeholder(tf.float32, []) + x_ph = tf1.placeholder(tf.float32, []) y = ed.RandomVariable(tfd.Normal(x_ph, 0.1)) self.assertLess(x.eval(), 5.0) self.assertLess(x.eval(sess), 5.0) @@ -312,7 +313,7 @@ def testSessionRun(self): if tf.executing_eagerly(): return with self.cached_session() as sess: x = ed.RandomVariable(tfd.Normal(0.0, 0.1)) - x_ph = tf.compat.v1.placeholder(tf.float32, []) + x_ph = tf1.placeholder(tf.float32, []) y = ed.RandomVariable(tfd.Normal(x_ph, 0.1)) self.assertLess(sess.run(x), 5.0) self.assertLess(sess.run(x, feed_dict={x_ph: 100.0}), 5.0) diff --git a/tensorflow_probability/python/experimental/mcmc/BUILD b/tensorflow_probability/python/experimental/mcmc/BUILD index b829202b72..e55920409c 100644 --- a/tensorflow_probability/python/experimental/mcmc/BUILD +++ b/tensorflow_probability/python/experimental/mcmc/BUILD @@ -65,6 +65,7 @@ py_library( "//tensorflow_probability", "//tensorflow_probability/python/distributions/internal:statistical_testing", "//tensorflow_probability/python/experimental/auto_batching", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -78,6 +79,7 @@ py_test( tags = ["nozapfhahn"], deps = [ ":nuts_testlib", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -92,5 +94,8 @@ py_test( "nozapfhahn", "requires-gpu-sm35", ], - deps = [":nuts_testlib"], + deps = [ + ":nuts_testlib", + "//tensorflow_probability/python/internal:test_case", + ], ) diff --git a/tensorflow_probability/python/experimental/mcmc/nuts.py b/tensorflow_probability/python/experimental/mcmc/nuts.py index d50a44e109..b8fb033750 100644 --- a/tensorflow_probability/python/experimental/mcmc/nuts.py +++ b/tensorflow_probability/python/experimental/mcmc/nuts.py @@ -44,7 +44,8 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import math as tfp_math from tensorflow_probability.python import mcmc @@ -215,20 +216,20 @@ def one_step(self, current_state, previous_kernel_results): current_grads_log_prob = previous_kernel_results.grads_target_log_prob leapfrogs_taken = previous_kernel_results.leapfrogs_taken leapfrogs_computed = previous_kernel_results.leapfrogs_computed - with tf.compat.v1.name_scope( + with tf1.name_scope( self.name, values=[ current_state, self.step_size, current_target_log_prob, current_grads_log_prob ]): unwrap_state_list = False - with tf.compat.v1.name_scope("initialize"): - if not tf.compat.v2.nest.is_nested(current_state): + with tf1.name_scope("initialize"): + if not tf.nest.is_nested(current_state): unwrap_state_list = True current_state = [current_state] current_state = [tf.convert_to_tensor(value=s) for s in current_state] step_size = self.step_size - if not tf.compat.v2.nest.is_nested(step_size): + if not tf.nest.is_nested(step_size): step_size = [step_size] step_size = [tf.convert_to_tensor(value=s) for s in step_size] if len(step_size) == 1: @@ -266,9 +267,9 @@ def one_step(self, current_state, previous_kernel_results): def bootstrap_results(self, init_state): """Creates initial `previous_kernel_results` using a supplied `state`.""" - if not tf.compat.v2.nest.is_nested(init_state): + if not tf.nest.is_nested(init_state): init_state = [init_state] - with tf.compat.v1.name_scope("NoUTurnSampler.bootstrap_results"): + with tf1.name_scope("NoUTurnSampler.bootstrap_results"): batch_size = tf.shape(input=init_state[0])[0] (current_target_log_prob, current_grads_log_prob) = self.value_and_gradients_fn(*init_state) @@ -586,7 +587,7 @@ def func_wrapped(*args, **kwargs): def _start_trajectory_batched( current_state, current_target_log_prob, seed_stream): """Computations needed to start a trajectory.""" - with tf.compat.v1.name_scope("start_trajectory_batched"): + with tf1.name_scope("start_trajectory_batched"): batch_size = tf.shape(input=current_state[0])[0] current_momentum = [] for state_tensor in current_state: @@ -611,13 +612,13 @@ def _start_trajectory_batched( def _batchwise_reduce_sum(x): - with tf.compat.v1.name_scope("batchwise_reduce_sum"): + with tf1.name_scope("batchwise_reduce_sum"): return tf.reduce_sum(input_tensor=x, axis=tf.range(1, tf.rank(x))) def _has_no_u_turn(state_one, state_two, momentum): """If two given states and momentum do not exhibit a U-turn pattern.""" - with tf.compat.v1.name_scope("has_no_u_turn"): + with tf1.name_scope("has_no_u_turn"): batch_dot_product = sum( [_batchwise_reduce_sum((s1 - s2) * m) for s1, s2, m in zip(state_one, state_two, momentum)]) @@ -630,7 +631,7 @@ def _leapfrog_base(value_and_gradients_fn, direction, unrolled_leapfrog_steps): """Runs `unrolled_leapfrog_steps` steps of leapfrog integration.""" - with tf.compat.v1.name_scope("leapfrog"): + with tf1.name_scope("leapfrog"): step_size = [d * s for d, s in zip(direction, step_size)] for _ in range(unrolled_leapfrog_steps): mid_momentum = [ @@ -639,7 +640,7 @@ def _leapfrog_base(value_and_gradients_fn, next_state = [ s + step * m for s, step, m in zip(current.state, step_size, mid_momentum)] - with tf.compat.v1.name_scope("gradients"): + with tf1.name_scope("gradients"): [next_target_log_prob, next_grads_target_log_prob] = value_and_gradients_fn(*next_state) next_momentum = [ @@ -697,7 +698,7 @@ def _expand_dims_under_batch_dim(tensor, new_rank): def _log_joint(current): """Log-joint probability given a state's log-probability and momentum.""" - with tf.compat.v1.name_scope("log_joint"): + with tf1.name_scope("log_joint"): momentum_log_prob = -sum([ _batchwise_reduce_sum(0.5 * (m ** 2)) for m in current.momentum]) return current.target_log_prob + momentum_log_prob @@ -707,13 +708,13 @@ def _compute_num_states_batched(next_log_joint, log_slice_sample): # Returns the number of states (of necessity, at most one per batch member) # represented by the `next_log_joint` Tensor that are good enough to pass the # slice variable. - with tf.compat.v1.name_scope("compute_num_states_batched"): + with tf1.name_scope("compute_num_states_batched"): return tf.cast(next_log_joint > log_slice_sample, dtype=tf.int64) def _random_bernoulli(shape, probs, dtype=tf.int64, seed=None, name=None): """Returns samples from a Bernoulli distribution.""" - with tf.compat.v1.name_scope(name, "random_bernoulli", [shape, probs]): + with tf1.name_scope(name, "random_bernoulli", [shape, probs]): probs = tf.convert_to_tensor(value=probs) random_uniform = tf.random.uniform(shape, dtype=probs.dtype, seed=seed) return tf.cast(tf.less(random_uniform, probs), dtype) @@ -721,7 +722,7 @@ def _random_bernoulli(shape, probs, dtype=tf.int64, seed=None, name=None): def _continue_test_batched( continue_trajectory, forward, reverse): - with tf.compat.v1.name_scope("continue_test_batched"): + with tf1.name_scope("continue_test_batched"): return (continue_trajectory & _has_no_u_turn(forward.state, reverse.state, forward.momentum) & _has_no_u_turn(forward.state, reverse.state, reverse.momentum)) @@ -729,7 +730,7 @@ def _continue_test_batched( def _binomial_subtree_acceptance_batched( num_states_in_subtree, num_states, seed_stream): - with tf.compat.v1.name_scope("binomial_subtree_acceptance_batched"): + with tf1.name_scope("binomial_subtree_acceptance_batched"): batch_size = tf.shape(input=num_states_in_subtree)[0] return _random_bernoulli( [batch_size], @@ -741,7 +742,7 @@ def _binomial_subtree_acceptance_batched( def _choose_direction_batched(point, seed_stream): - with tf.compat.v1.name_scope("choose_direction_batched"): + with tf1.name_scope("choose_direction_batched"): batch_size = tf.shape(input=point.state[0])[0] dtype = point.state[0].dtype return tfp_math.random_rademacher( @@ -750,4 +751,4 @@ def _choose_direction_batched(point, seed_stream): def _tf_where(condition, x, y): return ab.instructions.pattern_map2( - lambda x_elt, y_elt: tf.compat.v1.where(condition, x_elt, y_elt), x, y) + lambda x_elt, y_elt: tf1.where(condition, x_elt, y_elt), x, y) diff --git a/tensorflow_probability/python/experimental/mcmc/nuts_test.py b/tensorflow_probability/python/experimental/mcmc/nuts_test.py index 0e2458e615..ec5de3c02f 100644 --- a/tensorflow_probability/python/experimental/mcmc/nuts_test.py +++ b/tensorflow_probability/python/experimental/mcmc/nuts_test.py @@ -21,26 +21,27 @@ import itertools # Dependency imports + from absl.testing import parameterized import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.distributions.internal import statistical_testing as st from tensorflow_probability.python.experimental.auto_batching import instructions as inst +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfb = tfp.bijectors -tfd = tfp.distributions - def run_nuts_chain( event_size, batch_size, num_steps, initial_state=None, dry_run=False, stackless=False): def target_log_prob_fn(event): - with tf.compat.v1.name_scope('nuts_test_target_log_prob', values=[event]): + with tf1.name_scope('nuts_test_target_log_prob', values=[event]): return tfd.MultivariateNormalDiag( tf.zeros(event_size), scale_identity_multiplier=1.).log_prob(event) @@ -111,21 +112,21 @@ def target(*args): answer = result[0] check_cdf_agrees = st.assert_true_cdf_equal_by_dkwm( answer, target_d.cdf, false_fail_rate=1e-6) - check_enough_power = tf.compat.v1.assert_less( + check_enough_power = tf1.assert_less( st.min_discrepancy_of_true_cdfs_detectable_by_dkwm( num_samples, false_fail_rate=1e-6, false_pass_rate=1e-6), 0.025) test.assertAllEqual([num_samples], extra.leapfrogs_taken[0].shape) unique, _ = tf.unique(extra.leapfrogs_taken[0]) - check_leapfrogs_vary = tf.compat.v1.assert_greater_equal( + check_leapfrogs_vary = tf1.assert_greater_equal( tf.shape(input=unique)[0], 3) avg_leapfrogs = tf.math.reduce_mean(input_tensor=extra.leapfrogs_taken[0]) - check_leapfrogs = tf.compat.v1.assert_greater_equal( + check_leapfrogs = tf1.assert_greater_equal( avg_leapfrogs, tf.constant(4, dtype=avg_leapfrogs.dtype)) movement = tf.abs(answer - initialization) test.assertAllEqual([num_samples], movement.shape) # This movement distance (1 * step_size) was selected by reducing until 100 # runs with independent seeds all passed. - check_movement = tf.compat.v1.assert_greater_equal( + check_movement = tf1.assert_greater_equal( tf.reduce_mean(input_tensor=movement), 1 * step_size) return (check_cdf_agrees, check_enough_power, check_leapfrogs_vary, check_leapfrogs, check_movement) @@ -143,19 +144,19 @@ def assert_mvn_target_conservation(event_size, batch_size, **kwargs): check_cdf_agrees = ( st.assert_multivariate_true_cdf_equal_on_projections_two_sample( answer, initialization, num_projections=100, false_fail_rate=1e-6)) - check_sample_shape = tf.compat.v1.assert_equal( + check_sample_shape = tf1.assert_equal( tf.shape(input=answer)[0], batch_size) unique, _ = tf.unique(leapfrogs[0]) - check_leapfrogs_vary = tf.compat.v1.assert_greater_equal( + check_leapfrogs_vary = tf1.assert_greater_equal( tf.shape(input=unique)[0], 3) avg_leapfrogs = tf.math.reduce_mean(input_tensor=leapfrogs[0]) - check_leapfrogs = tf.compat.v1.assert_greater_equal( + check_leapfrogs = tf1.assert_greater_equal( avg_leapfrogs, tf.constant(4, dtype=avg_leapfrogs.dtype)) movement = tf.linalg.norm(tensor=answer - initialization, axis=-1) # This movement distance (0.3) was copied from the univariate case. - check_movement = tf.compat.v1.assert_greater_equal( + check_movement = tf1.assert_greater_equal( tf.reduce_mean(input_tensor=movement), 0.3) - check_enough_power = tf.compat.v1.assert_less( + check_enough_power = tf1.assert_less( st.min_discrepancy_of_true_cdfs_detectable_by_dkwm_two_sample( batch_size, batch_size, false_fail_rate=1e-8, false_pass_rate=1e-6), 0.055) @@ -164,7 +165,7 @@ def assert_mvn_target_conservation(event_size, batch_size, **kwargs): @test_util.run_all_in_graph_and_eager_modes -class NutsTest(parameterized.TestCase, tf.test.TestCase): +class NutsTest(parameterized.TestCase, test_case.TestCase): @parameterized.parameters(itertools.product([2, 3], [1, 2, 3])) def testLeapfrogStepCounter(self, tree_depth, unrolled_leapfrog_steps): diff --git a/tensorflow_probability/python/experimental/substrates/jax/distributions/internal/BUILD b/tensorflow_probability/python/experimental/substrates/jax/distributions/internal/BUILD index 3fe05cddf4..bcd9ae100b 100644 --- a/tensorflow_probability/python/experimental/substrates/jax/distributions/internal/BUILD +++ b/tensorflow_probability/python/experimental/substrates/jax/distributions/internal/BUILD @@ -66,6 +66,7 @@ TEST_FILENAMES = [filename + "_test" for filename in FILENAMES] # numpy dep, # scipy dep, # six dep, + "//tensorflow_probability/python/experimental/substrates/jax/internal:test_case", ], ) for filename in TEST_FILENAMES] diff --git a/tensorflow_probability/python/experimental/substrates/jax/math/BUILD b/tensorflow_probability/python/experimental/substrates/jax/math/BUILD index 60ae03c9c3..c0e7a6e492 100644 --- a/tensorflow_probability/python/experimental/substrates/jax/math/BUILD +++ b/tensorflow_probability/python/experimental/substrates/jax/math/BUILD @@ -60,6 +60,7 @@ TEST_FILENAMES = [] tags = ["tfp_jax"], deps = [ ":math", + "//tensorflow_probability/python/experimental/substrates/jax/internal:test_case", ], ) for filename in TEST_FILENAMES] diff --git a/tensorflow_probability/python/experimental/substrates/jax/stats/BUILD b/tensorflow_probability/python/experimental/substrates/jax/stats/BUILD index 766e8cce7f..4975832888 100644 --- a/tensorflow_probability/python/experimental/substrates/jax/stats/BUILD +++ b/tensorflow_probability/python/experimental/substrates/jax/stats/BUILD @@ -57,6 +57,7 @@ TEST_FILENAMES = [] tags = ["tfp_jax"], deps = [ ":stats", + "//tensorflow_probability/python/experimental/substrates/jax/internal:test_case", ], ) for filename in TEST_FILENAMES] diff --git a/tensorflow_probability/python/experimental/substrates/jax/util/BUILD b/tensorflow_probability/python/experimental/substrates/jax/util/BUILD index 9a2eb93cd1..fb917404b8 100644 --- a/tensorflow_probability/python/experimental/substrates/jax/util/BUILD +++ b/tensorflow_probability/python/experimental/substrates/jax/util/BUILD @@ -57,6 +57,7 @@ TEST_FILENAMES = [] tags = ["tfp_jax"], deps = [ ":util", + "//tensorflow_probability/python/experimental/substrates/jax/internal:test_case", ], ) for filename in TEST_FILENAMES] diff --git a/tensorflow_probability/python/experimental/substrates/numpy/BUILD b/tensorflow_probability/python/experimental/substrates/numpy/BUILD index 807cd9a768..b430d5bb63 100644 --- a/tensorflow_probability/python/experimental/substrates/numpy/BUILD +++ b/tensorflow_probability/python/experimental/substrates/numpy/BUILD @@ -53,5 +53,6 @@ py_test( tags = ["tfp_numpy"], deps = [ "//tensorflow_probability", + "//tensorflow_probability/python/experimental/substrates/numpy/internal:test_case", ], ) diff --git a/tensorflow_probability/python/experimental/substrates/numpy/distributions/internal/BUILD b/tensorflow_probability/python/experimental/substrates/numpy/distributions/internal/BUILD index 320ac594f1..5b886497fa 100644 --- a/tensorflow_probability/python/experimental/substrates/numpy/distributions/internal/BUILD +++ b/tensorflow_probability/python/experimental/substrates/numpy/distributions/internal/BUILD @@ -66,6 +66,7 @@ TEST_FILENAMES = [filename + "_test" for filename in FILENAMES] # numpy dep, # scipy dep, # six dep, + "//tensorflow_probability/python/experimental/substrates/numpy/internal:test_case", ], ) for filename in TEST_FILENAMES] diff --git a/tensorflow_probability/python/experimental/substrates/numpy/math/BUILD b/tensorflow_probability/python/experimental/substrates/numpy/math/BUILD index 6d3be72d95..a9fe632b41 100644 --- a/tensorflow_probability/python/experimental/substrates/numpy/math/BUILD +++ b/tensorflow_probability/python/experimental/substrates/numpy/math/BUILD @@ -60,6 +60,7 @@ TEST_FILENAMES = [] tags = ["tfp_numpy"], deps = [ ":math", + "//tensorflow_probability/python/experimental/substrates/numpy/internal:test_case", ], ) for filename in TEST_FILENAMES] diff --git a/tensorflow_probability/python/experimental/substrates/numpy/rewrite.py b/tensorflow_probability/python/experimental/substrates/numpy/rewrite.py index a138b844c3..f7a1c742fc 100644 --- a/tensorflow_probability/python/experimental/substrates/numpy/rewrite.py +++ b/tensorflow_probability/python/experimental/substrates/numpy/rewrite.py @@ -18,11 +18,17 @@ from __future__ import division from __future__ import print_function +import sys +if not sys.path[0].endswith('.runfiles'): + sys.path.pop(0) + +# pylint: disable=g-import-not-at-top,g-bad-import-order import collections # Dependency imports from absl import app from absl import flags +# pylint: enable=g-import-not-at-top,g-bad-import-order flags.DEFINE_boolean('numpy_to_jax', False, 'Whether or not to rewrite numpy imports to jax.numpy') diff --git a/tensorflow_probability/python/experimental/substrates/numpy/stats/BUILD b/tensorflow_probability/python/experimental/substrates/numpy/stats/BUILD index 079587be10..e0d4107560 100644 --- a/tensorflow_probability/python/experimental/substrates/numpy/stats/BUILD +++ b/tensorflow_probability/python/experimental/substrates/numpy/stats/BUILD @@ -57,6 +57,7 @@ TEST_FILENAMES = [] tags = ["tfp_numpy"], deps = [ ":stats", + "//tensorflow_probability/python/experimental/substrates/numpy/internal:test_case", ], ) for filename in TEST_FILENAMES] diff --git a/tensorflow_probability/python/experimental/substrates/numpy/util/BUILD b/tensorflow_probability/python/experimental/substrates/numpy/util/BUILD index c83bc1b570..03c474eac5 100644 --- a/tensorflow_probability/python/experimental/substrates/numpy/util/BUILD +++ b/tensorflow_probability/python/experimental/substrates/numpy/util/BUILD @@ -56,6 +56,7 @@ TEST_FILENAMES = ["{}_test".format(filename) for filename in FILENAMES] deps = [ ":util", "//tensorflow_probability", + "//tensorflow_probability/python/experimental/substrates/numpy/internal:test_case", ], ) for filename in TEST_FILENAMES] diff --git a/tensorflow_probability/python/glm/BUILD b/tensorflow_probability/python/glm/BUILD index 5b456a68be..f1049c941d 100644 --- a/tensorflow_probability/python/glm/BUILD +++ b/tensorflow_probability/python/glm/BUILD @@ -57,6 +57,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -79,6 +80,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -101,5 +103,6 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/glm/family.py b/tensorflow_probability/python/glm/family.py index a64d11c993..7cc02e856e 100644 --- a/tensorflow_probability/python/glm/family.py +++ b/tensorflow_probability/python/glm/family.py @@ -21,7 +21,8 @@ import contextlib import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import dtype_util @@ -74,7 +75,7 @@ def __init__(self, name=None): functions. Default value: `None` (i.e., the subclass name). """ if not name or name[-1] != '/': # `name` is not a name scope. - with tf.compat.v1.name_scope(name or type(self).__name__) as name: + with tf1.name_scope(name or type(self).__name__) as name: pass self._name = name @@ -173,8 +174,8 @@ def __repr__(self): @contextlib.contextmanager def _name_scope(self, name=None, default_name=None, values=None): """Helper function to standardize op scope.""" - with tf.compat.v1.name_scope(self.name): - with tf.compat.v1.name_scope( + with tf1.name_scope(self.name): + with tf1.name_scope( name, default_name, values=values or []) as scope: yield scope diff --git a/tensorflow_probability/python/glm/family_test.py b/tensorflow_probability/python/glm/family_test.py index f08caaa8f2..3ef919bac1 100644 --- a/tensorflow_probability/python/glm/family_test.py +++ b/tensorflow_probability/python/glm/family_test.py @@ -19,13 +19,13 @@ from __future__ import print_function # Dependency imports -import numpy as np -import tensorflow as tf +import numpy as np +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case -tfb = tfp.bijectors -tfd = tfp.distributions from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -104,7 +104,7 @@ def testLogProbWorksCorrectly(self): @test_util.run_all_in_graph_and_eager_modes -class BernoulliTest(tf.test.TestCase, _GLMTestHarness): +class BernoulliTest(test_case.TestCase, _GLMTestHarness): def setUp(self): self.dtype = np.float32 @@ -114,7 +114,7 @@ def setUp(self): @test_util.run_all_in_graph_and_eager_modes -class BernoulliNormalCDFTest(tf.test.TestCase, _GLMTestHarness): +class BernoulliNormalCDFTest(test_case.TestCase, _GLMTestHarness): def setUp(self): self.dtype = np.float32 @@ -129,7 +129,7 @@ def normal_cdf(r): @test_util.run_all_in_graph_and_eager_modes -class GammaExpTest(tf.test.TestCase, _GLMTestHarness): +class GammaExpTest(test_case.TestCase, _GLMTestHarness): def setUp(self): self.dtype = np.float32 @@ -141,7 +141,7 @@ def setUp(self): @test_util.run_all_in_graph_and_eager_modes -class GammaSoftplusTest(tf.test.TestCase, _GLMTestHarness): +class GammaSoftplusTest(test_case.TestCase, _GLMTestHarness): def setUp(self): self.dtype = np.float32 @@ -153,7 +153,7 @@ def setUp(self): @test_util.run_all_in_graph_and_eager_modes -class LogNormalTest(tf.test.TestCase, _GLMTestHarness): +class LogNormalTest(test_case.TestCase, _GLMTestHarness): def setUp(self): self.dtype = np.float32 @@ -163,7 +163,7 @@ def setUp(self): @test_util.run_all_in_graph_and_eager_modes -class LogNormalSoftplusTest(tf.test.TestCase, _GLMTestHarness): +class LogNormalSoftplusTest(test_case.TestCase, _GLMTestHarness): def setUp(self): self.dtype = np.float32 @@ -173,7 +173,7 @@ def setUp(self): @test_util.run_all_in_graph_and_eager_modes -class NormalTest(tf.test.TestCase, _GLMTestHarness): +class NormalTest(test_case.TestCase, _GLMTestHarness): def setUp(self): self.dtype = np.float32 @@ -183,7 +183,7 @@ def setUp(self): @test_util.run_all_in_graph_and_eager_modes -class NormalReciprocalTest(tf.test.TestCase, _GLMTestHarness): +class NormalReciprocalTest(test_case.TestCase, _GLMTestHarness): def setUp(self): self.dtype = np.float32 @@ -193,7 +193,7 @@ def setUp(self): @test_util.run_all_in_graph_and_eager_modes -class PoissonTest(tf.test.TestCase, _GLMTestHarness): +class PoissonTest(test_case.TestCase, _GLMTestHarness): def setUp(self): self.dtype = np.float32 @@ -203,7 +203,7 @@ def setUp(self): @test_util.run_all_in_graph_and_eager_modes -class PoissonSoftplusTest(tf.test.TestCase, _GLMTestHarness): +class PoissonSoftplusTest(test_case.TestCase, _GLMTestHarness): def setUp(self): self.dtype = np.float32 diff --git a/tensorflow_probability/python/glm/fisher_scoring.py b/tensorflow_probability/python/glm/fisher_scoring.py index 5c5d0e97a0..fcf9434f4f 100644 --- a/tensorflow_probability/python/glm/fisher_scoring.py +++ b/tensorflow_probability/python/glm/fisher_scoring.py @@ -20,7 +20,8 @@ import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.internal import dtype_util from tensorflow_probability.python.internal import prefer_static @@ -189,7 +190,7 @@ def make_dataset(n, d, link, scale=1., dtype=np.float32): graph_deps = [model_matrix, response, model_coefficients_start, predicted_linear_response_start, dispersion, offset, learning_rate, maximum_iterations] - with tf.compat.v1.name_scope(name, 'fit', graph_deps): + with tf1.name_scope(name, 'fit', graph_deps): [ model_matrix, response, @@ -341,7 +342,7 @@ def fit_one_step( """ graph_deps = [model_matrix, response, model_coefficients_start, predicted_linear_response_start, dispersion, learning_rate] - with tf.compat.v1.name_scope(name, 'fit_one_step', graph_deps): + with tf1.name_scope(name, 'fit_one_step', graph_deps): [ model_matrix, @@ -370,7 +371,7 @@ def fit_one_step( def mask_if_invalid(x, mask): mask = tf.fill( tf.shape(input=x), value=np.array(mask, x.dtype.as_numpy_dtype)) - return tf.compat.v1.where(is_valid, x, mask) + return tf1.where(is_valid, x, mask) # Run one step of iteratively reweighted least-squares. # Compute "`z`", the adjusted predicted linear response. @@ -595,7 +596,7 @@ def prepare_args(model_matrix, """ graph_deps = [model_matrix, response, model_coefficients, predicted_linear_response, offset] - with tf.compat.v1.name_scope(name, 'prepare_args', graph_deps): + with tf1.name_scope(name, 'prepare_args', graph_deps): dtype = dtype_util.common_dtype(graph_deps, np.float32) model_matrix = tf.convert_to_tensor( @@ -656,7 +657,7 @@ def prepare_args(model_matrix, def calculate_linear_predictor(model_matrix, model_coefficients, offset=None, name=None): """Computes `model_matrix @ model_coefficients + offset`.""" - with tf.compat.v1.name_scope(name, 'calculate_linear_predictor', + with tf1.name_scope(name, 'calculate_linear_predictor', [model_matrix, model_coefficients, offset]): predicted_linear_response = tf.linalg.matvec(model_matrix, model_coefficients) diff --git a/tensorflow_probability/python/glm/fisher_scoring_test.py b/tensorflow_probability/python/glm/fisher_scoring_test.py index 1902b36ec7..007890c0bb 100644 --- a/tensorflow_probability/python/glm/fisher_scoring_test.py +++ b/tensorflow_probability/python/glm/fisher_scoring_test.py @@ -19,16 +19,19 @@ from __future__ import print_function # Dependency imports -import numpy as np -import tensorflow as tf +import numpy as np +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp -tfd = tfp.distributions +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class FitTestFast(tf.test.TestCase): +class FitTestFast(test_case.TestCase): dtype = np.float32 fast = True @@ -272,7 +275,7 @@ def _testL2RegularizationWorksCorrectly(self, static_l2): ] = self.make_dataset(n=n, d=3, link='probit') l2_regularizer = np.array(0.07 * n, model_matrix.dtype.as_numpy_dtype) if not static_l2: - l2_regularizer = tf.compat.v1.placeholder_with_default( + l2_regularizer = tf1.placeholder_with_default( l2_regularizer, shape=[]) [ expected_model_coefficients, diff --git a/tensorflow_probability/python/glm/proximal_hessian.py b/tensorflow_probability/python/glm/proximal_hessian.py index 0277805291..ddfba9e508 100644 --- a/tensorflow_probability/python/glm/proximal_hessian.py +++ b/tensorflow_probability/python/glm/proximal_hessian.py @@ -26,7 +26,8 @@ import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp from tensorflow_probability.python.math.linalg import sparse_or_dense_matvecmul @@ -95,7 +96,7 @@ def _grad_neg_log_likelihood_and_fim(model_matrix, linear_response, response, def _mask_if_invalid(x, mask): mask = tf.fill( tf.shape(input=x), value=np.array(mask, x.dtype.as_numpy_dtype)) - return tf.compat.v1.where(is_valid, x, mask) + return tf1.where(is_valid, x, mask) # TODO(b/111923449): Link to derivation once it's available. v = (response - mean) * _mask_if_invalid(grad_mean, 1) / _mask_if_invalid( @@ -213,7 +214,7 @@ def fit_sparse_one_step(model_matrix, tolerance, learning_rate, ] - with tf.compat.v1.name_scope(name, 'fit_sparse_one_step', graph_deps): + with tf1.name_scope(name, 'fit_sparse_one_step', graph_deps): predicted_linear_response = sparse_or_dense_matvecmul( model_matrix, model_coefficients_start) g, h_middle = _grad_neg_log_likelihood_and_fim( @@ -468,7 +469,7 @@ def make_dataset(n, d, link, scale=1., dtype=np.float32): tolerance, learning_rate, ] - with tf.compat.v1.name_scope(name, 'fit_sparse', graph_deps): + with tf1.name_scope(name, 'fit_sparse', graph_deps): # TODO(b/111922388): Include dispersion and offset parameters. def _grad_neg_log_likelihood_and_fim_fn(x): predicted_linear_response = sparse_or_dense_matvecmul(model_matrix, x) @@ -513,7 +514,7 @@ def _fit_sparse_exact_hessian( # pylint: disable = missing-docstring tolerance, learning_rate, ] - with tf.compat.v1.name_scope(name, 'fit_sparse_exact_hessian', graph_deps): + with tf1.name_scope(name, 'fit_sparse_exact_hessian', graph_deps): # TODO(b/111922388): Include dispersion and offset parameters. def _neg_log_likelihood(x): predicted_linear_response = sparse_or_dense_matvecmul(model_matrix, x) diff --git a/tensorflow_probability/python/glm/proximal_hessian_test.py b/tensorflow_probability/python/glm/proximal_hessian_test.py index 7de2a43a18..fe913b0f11 100644 --- a/tensorflow_probability/python/glm/proximal_hessian_test.py +++ b/tensorflow_probability/python/glm/proximal_hessian_test.py @@ -19,15 +19,16 @@ from __future__ import print_function # Dependency imports -import numpy as np -import tensorflow as tf +import numpy as np +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions - @test_util.run_all_in_graph_and_eager_modes class _ProximalHessianTest(object): @@ -66,7 +67,7 @@ def _make_dataset(self, tf.linalg.norm(tensor=model_coefficients, axis=-1)[..., tf.newaxis]) mask = tfd.Bernoulli(probs=0.5, dtype=tf.bool).sample(batch_shape + [d]) - model_coefficients = tf.compat.v1.where(mask, model_coefficients, + model_coefficients = tf1.where(mask, model_coefficients, tf.zeros_like(model_coefficients)) model_matrix = tfd.Normal( loc=np.array(0, dtype), scale=np.array(1, dtype)).sample( @@ -89,7 +90,7 @@ def _make_dataset(self, return self.evaluate([model_matrix, response, model_coefficients, mask]) def _make_placeholder(self, x): - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=x, shape=(x.shape if self.use_static_shape else None)) def _adjust_dtype_and_shape_hints(self, x): @@ -385,25 +386,25 @@ def testCompareBatchResultsToSingleInstance_Sparse(self): self._test_compare_batch_to_single_instance(use_sparse_tensor=True) -class ProximalHessianTestStaticShapeFloat32(tf.test.TestCase, +class ProximalHessianTestStaticShapeFloat32(test_case.TestCase, _ProximalHessianTest): dtype = tf.float32 use_static_shape = True -class ProximalHessianTestDynamicShapeFloat32(tf.test.TestCase, +class ProximalHessianTestDynamicShapeFloat32(test_case.TestCase, _ProximalHessianTest): dtype = tf.float32 use_static_shape = False -class ProximalHessianTestStaticShapeFloat64(tf.test.TestCase, +class ProximalHessianTestStaticShapeFloat64(test_case.TestCase, _ProximalHessianTest): dtype = tf.float64 use_static_shape = True -class ProximalHessianTestDynamicShapeFloat64(tf.test.TestCase, +class ProximalHessianTestDynamicShapeFloat64(test_case.TestCase, _ProximalHessianTest): dtype = tf.float64 use_static_shape = False diff --git a/tensorflow_probability/python/internal/BUILD b/tensorflow_probability/python/internal/BUILD index e35bd7d482..8126664925 100644 --- a/tensorflow_probability/python/internal/BUILD +++ b/tensorflow_probability/python/internal/BUILD @@ -64,6 +64,7 @@ py_test( ":docstring_util", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -86,6 +87,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -115,6 +117,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability/python/distributions", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/math:gradient", ], ) @@ -155,6 +158,7 @@ py_test( # absl/testing:parameterized dep, # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -206,6 +210,7 @@ py_test( # numpy dep, # scipy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/math:gradient", ], ) @@ -290,6 +295,7 @@ py_test( # hypothesis dep, # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -305,6 +311,7 @@ py_library( # numpy dep, # six dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal/backend/numpy", "//tensorflow_probability/python/util:seed_stream", ], @@ -319,6 +326,7 @@ py_test( # absl/flags dep, # absl/testing:flagsaver dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/util:seed_stream", ], ) diff --git a/tensorflow_probability/python/internal/assert_util.py b/tensorflow_probability/python/internal/assert_util.py index f518a3d4c2..7fbd7cbc90 100644 --- a/tensorflow_probability/python/internal/assert_util.py +++ b/tensorflow_probability/python/internal/assert_util.py @@ -21,25 +21,26 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf # Note: These assertions raise tf.errors.InvalidArgumentError when they fail. -assert_equal = tf.compat.v1.assert_equal -assert_greater = tf.compat.v1.assert_greater -assert_less = tf.compat.v1.assert_less -assert_rank = tf.compat.v1.assert_rank - -assert_greater_equal = tf.compat.v1.assert_greater_equal -assert_integer = tf.compat.v1.assert_integer -assert_less_equal = tf.compat.v1.assert_less_equal -assert_near = tf.compat.v1.assert_near -assert_negative = tf.compat.v1.assert_negative -assert_non_negative = tf.compat.v1.assert_non_negative -assert_non_positive = tf.compat.v1.assert_non_positive -assert_none_equal = tf.compat.v1.assert_none_equal -assert_positive = tf.compat.v1.assert_positive -assert_rank_at_least = tf.compat.v1.assert_rank_at_least -assert_rank_in = tf.compat.v1.assert_rank_in +assert_equal = tf1.assert_equal +assert_greater = tf1.assert_greater +assert_less = tf1.assert_less +assert_rank = tf1.assert_rank + +assert_greater_equal = tf1.assert_greater_equal +assert_integer = tf1.assert_integer +assert_less_equal = tf1.assert_less_equal +assert_near = tf1.assert_near +assert_negative = tf1.assert_negative +assert_non_negative = tf1.assert_non_negative +assert_non_positive = tf1.assert_non_positive +assert_none_equal = tf1.assert_none_equal +assert_positive = tf1.assert_positive +assert_rank_at_least = tf1.assert_rank_at_least +assert_rank_in = tf1.assert_rank_in def assert_finite(x, data=None, summarize=None, message=None, name=None): @@ -61,13 +62,13 @@ def assert_finite(x, data=None, summarize=None, message=None, name=None): Raises: ValueError: If static checks determine `x` has wrong rank. """ - with tf.compat.v2.name_scope(name or 'assert_finite'): + with tf.name_scope(name or 'assert_finite'): x_ = tf.get_static_value(x) if x_ is not None: if ~np.all(np.isfinite(x_)): raise ValueError(message) return x - assertion = tf.compat.v1.assert_equal( + assertion = tf1.assert_equal( tf.math.is_finite(x), tf.ones_like(x, tf.bool), data=data, summarize=summarize, message=message) with tf.control_dependencies([assertion]): @@ -102,6 +103,6 @@ def assert_rank_at_most(x, rank, data=None, summarize=None, message=None, Raises: ValueError: If static checks determine `x` has wrong rank. """ - with tf.compat.v2.name_scope(name or 'assert_rank_at_most'): - return tf.compat.v1.assert_less_equal( + with tf.name_scope(name or 'assert_rank_at_most'): + return tf1.assert_less_equal( tf.rank(x), rank, data=data, summarize=summarize, message=message) diff --git a/tensorflow_probability/python/internal/backend/numpy/_utils.py b/tensorflow_probability/python/internal/backend/numpy/_utils.py index f7736da703..cf59b16016 100644 --- a/tensorflow_probability/python/internal/backend/numpy/_utils.py +++ b/tensorflow_probability/python/internal/backend/numpy/_utils.py @@ -22,7 +22,7 @@ import types import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf __all__ = [ diff --git a/tensorflow_probability/python/internal/backend/numpy/compat.py b/tensorflow_probability/python/internal/backend/numpy/compat.py index a1e36288cc..e4309f99ac 100644 --- a/tensorflow_probability/python/internal/backend/numpy/compat.py +++ b/tensorflow_probability/python/internal/backend/numpy/compat.py @@ -18,7 +18,7 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils from tensorflow_probability.python.internal.backend.numpy import v1 diff --git a/tensorflow_probability/python/internal/backend/numpy/control_flow.py b/tensorflow_probability/python/internal/backend/numpy/control_flow.py index 76aa934ef4..a7d34a23b6 100644 --- a/tensorflow_probability/python/internal/backend/numpy/control_flow.py +++ b/tensorflow_probability/python/internal/backend/numpy/control_flow.py @@ -20,7 +20,7 @@ # Dependency imports -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils diff --git a/tensorflow_probability/python/internal/backend/numpy/debugging.py b/tensorflow_probability/python/internal/backend/numpy/debugging.py index 27bfb54d8b..3f2b0b21f4 100644 --- a/tensorflow_probability/python/internal/backend/numpy/debugging.py +++ b/tensorflow_probability/python/internal/backend/numpy/debugging.py @@ -19,7 +19,7 @@ # Dependency imports -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils diff --git a/tensorflow_probability/python/internal/backend/numpy/dtype.py b/tensorflow_probability/python/internal/backend/numpy/dtype.py index 6d7e7bf65a..b30cdc38ac 100644 --- a/tensorflow_probability/python/internal/backend/numpy/dtype.py +++ b/tensorflow_probability/python/internal/backend/numpy/dtype.py @@ -21,7 +21,7 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils diff --git a/tensorflow_probability/python/internal/backend/numpy/errors.py b/tensorflow_probability/python/internal/backend/numpy/errors.py index d9221d2ea8..762bce68c2 100644 --- a/tensorflow_probability/python/internal/backend/numpy/errors.py +++ b/tensorflow_probability/python/internal/backend/numpy/errors.py @@ -18,7 +18,7 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v2 as tf __all__ = [ diff --git a/tensorflow_probability/python/internal/backend/numpy/functional_ops.py b/tensorflow_probability/python/internal/backend/numpy/functional_ops.py index f1ede55bea..6d71c02580 100644 --- a/tensorflow_probability/python/internal/backend/numpy/functional_ops.py +++ b/tensorflow_probability/python/internal/backend/numpy/functional_ops.py @@ -21,7 +21,7 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils diff --git a/tensorflow_probability/python/internal/backend/numpy/gen_linear_operators.py b/tensorflow_probability/python/internal/backend/numpy/gen_linear_operators.py index 6c9b11202d..8eb9943b84 100644 --- a/tensorflow_probability/python/internal/backend/numpy/gen_linear_operators.py +++ b/tensorflow_probability/python/internal/backend/numpy/gen_linear_operators.py @@ -18,14 +18,18 @@ from __future__ import division from __future__ import print_function +import sys +if not sys.path[0].endswith('.runfiles'): + sys.path.pop(0) + +# pylint: disable=g-import-not-at-top,g-bad-import-order import importlib import inspect import re -# Dependency imports - from absl import app from absl import flags +# pylint: enable=g-import-not-at-top,g-bad-import-order FLAGS = flags.FLAGS diff --git a/tensorflow_probability/python/internal/backend/numpy/initializers.py b/tensorflow_probability/python/internal/backend/numpy/initializers.py index 74e927cee2..5323678a2f 100644 --- a/tensorflow_probability/python/internal/backend/numpy/initializers.py +++ b/tensorflow_probability/python/internal/backend/numpy/initializers.py @@ -21,7 +21,8 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils @@ -31,7 +32,7 @@ ] constant = utils.copy_docstring( - tf.compat.v1.initializers.constant, + tf1.initializers.constant, lambda value=0, dtype=tf.dtypes.float32, verify_shape=False: ( # pylint: disable=g-long-lambda lambda shape, dtype=None, partition_info=None, verify_shape=None: ( # pylint: disable=g-long-lambda np.ones(shape, dtype=dtype) * value)) diff --git a/tensorflow_probability/python/internal/backend/numpy/linalg_impl.py b/tensorflow_probability/python/internal/backend/numpy/linalg_impl.py index 225cb98051..b995d11fdc 100644 --- a/tensorflow_probability/python/internal/backend/numpy/linalg_impl.py +++ b/tensorflow_probability/python/internal/backend/numpy/linalg_impl.py @@ -22,7 +22,7 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils diff --git a/tensorflow_probability/python/internal/backend/numpy/misc.py b/tensorflow_probability/python/internal/backend/numpy/misc.py index e62eaa413f..7357b910be 100644 --- a/tensorflow_probability/python/internal/backend/numpy/misc.py +++ b/tensorflow_probability/python/internal/backend/numpy/misc.py @@ -21,7 +21,7 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils from tensorflow_probability.python.internal.backend.numpy.ops import is_tensor diff --git a/tensorflow_probability/python/internal/backend/numpy/nn.py b/tensorflow_probability/python/internal/backend/numpy/nn.py index b41ae2c988..fae165653f 100644 --- a/tensorflow_probability/python/internal/backend/numpy/nn.py +++ b/tensorflow_probability/python/internal/backend/numpy/nn.py @@ -21,7 +21,7 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils from tensorflow_probability.python.internal.backend.numpy import numpy_array diff --git a/tensorflow_probability/python/internal/backend/numpy/numpy_array.py b/tensorflow_probability/python/internal/backend/numpy/numpy_array.py index 6b799949c4..e7dd382106 100644 --- a/tensorflow_probability/python/internal/backend/numpy/numpy_array.py +++ b/tensorflow_probability/python/internal/backend/numpy/numpy_array.py @@ -21,7 +21,8 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils from tensorflow_probability.python.internal.backend.numpy import ops @@ -313,7 +314,7 @@ def _zeros_like(input, dtype=None, name=None): # pylint: disable=redefined-buil lambda value, num=None, axis=0, name=None: np.split(value, num, axis)) where = utils.copy_docstring( - tf.compat.v1.where, + tf1.where, lambda condition, x=None, y=None, name=None: np.where(condition, x, y)) zeros = utils.copy_docstring( diff --git a/tensorflow_probability/python/internal/backend/numpy/numpy_logging.py b/tensorflow_probability/python/internal/backend/numpy/numpy_logging.py index 2a0324fc10..c20b5a98f3 100644 --- a/tensorflow_probability/python/internal/backend/numpy/numpy_logging.py +++ b/tensorflow_probability/python/internal/backend/numpy/numpy_logging.py @@ -20,7 +20,8 @@ # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils @@ -112,67 +113,67 @@ def _warning(*_, **__): # pylint: disable=unused-argument # --- Begin Public Functions -------------------------------------------------- TaskLevelStatusMessage = utils.copy_docstring( # pylint: disable=invalid-name - tf.compat.v1.logging.TaskLevelStatusMessage, + tf1.logging.TaskLevelStatusMessage, _TaskLevelStatusMessage) debug = utils.copy_docstring( - tf.compat.v1.logging.debug, + tf1.logging.debug, _debug) error = utils.copy_docstring( - tf.compat.v1.logging.error, + tf1.logging.error, _error) fatal = utils.copy_docstring( - tf.compat.v1.logging.fatal, + tf1.logging.fatal, _fatal) flush = utils.copy_docstring( - tf.compat.v1.logging.flush, + tf1.logging.flush, _flush) get_verbosity = utils.copy_docstring( - tf.compat.v1.logging.get_verbosity, + tf1.logging.get_verbosity, _get_verbosity) info = utils.copy_docstring( - tf.compat.v1.logging.info, + tf1.logging.info, _info) log = utils.copy_docstring( - tf.compat.v1.logging.log, + tf1.logging.log, _log) log_every_n = utils.copy_docstring( - tf.compat.v1.logging.log_every_n, + tf1.logging.log_every_n, _log_every_n) log_first_n = utils.copy_docstring( - tf.compat.v1.logging.log_first_n, + tf1.logging.log_first_n, _log_first_n) log_if = utils.copy_docstring( - tf.compat.v1.logging.log_if, + tf1.logging.log_if, _log_if) set_verbosity = utils.copy_docstring( - tf.compat.v1.logging.set_verbosity, + tf1.logging.set_verbosity, _set_verbosity) vlog = utils.copy_docstring( - tf.compat.v1.logging.vlog, + tf1.logging.vlog, _vlog) warn = utils.copy_docstring( - tf.compat.v1.logging.warn, + tf1.logging.warn, _warn) warning = utils.copy_docstring( - tf.compat.v1.logging.warning, + tf1.logging.warning, _warning) -DEBUG = tf.compat.v1.logging.DEBUG -ERROR = tf.compat.v1.logging.ERROR -FATAL = tf.compat.v1.logging.FATAL -INFO = tf.compat.v1.logging.INFO -WARN = tf.compat.v1.logging.WARN +DEBUG = tf1.logging.DEBUG +ERROR = tf1.logging.ERROR +FATAL = tf1.logging.FATAL +INFO = tf1.logging.INFO +WARN = tf1.logging.WARN diff --git a/tensorflow_probability/python/internal/backend/numpy/numpy_math.py b/tensorflow_probability/python/internal/backend/numpy/numpy_math.py index ec4f6a3041..b54ab25248 100644 --- a/tensorflow_probability/python/internal/backend/numpy/numpy_math.py +++ b/tensorflow_probability/python/internal/backend/numpy/numpy_math.py @@ -23,7 +23,7 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils from tensorflow_probability.python.internal.backend.numpy.numpy_array import _reverse diff --git a/tensorflow_probability/python/internal/backend/numpy/random_generators.py b/tensorflow_probability/python/internal/backend/numpy/random_generators.py index 8eaa99d403..364f2f7530 100644 --- a/tensorflow_probability/python/internal/backend/numpy/random_generators.py +++ b/tensorflow_probability/python/internal/backend/numpy/random_generators.py @@ -23,7 +23,7 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils from tensorflow_probability.python.internal.backend.numpy.numpy_math import softmax as _softmax diff --git a/tensorflow_probability/python/internal/backend/numpy/sets_lib.py b/tensorflow_probability/python/internal/backend/numpy/sets_lib.py index b4dce87152..8ed29706bc 100644 --- a/tensorflow_probability/python/internal/backend/numpy/sets_lib.py +++ b/tensorflow_probability/python/internal/backend/numpy/sets_lib.py @@ -21,7 +21,7 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils diff --git a/tensorflow_probability/python/internal/backend/numpy/sparse_lib.py b/tensorflow_probability/python/internal/backend/numpy/sparse_lib.py index 57c09c6dd8..eef6b74eb4 100644 --- a/tensorflow_probability/python/internal/backend/numpy/sparse_lib.py +++ b/tensorflow_probability/python/internal/backend/numpy/sparse_lib.py @@ -18,7 +18,7 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils diff --git a/tensorflow_probability/python/internal/backend/numpy/v1.py b/tensorflow_probability/python/internal/backend/numpy/v1.py index 74c4ab7ea5..30c4d983d4 100644 --- a/tensorflow_probability/python/internal/backend/numpy/v1.py +++ b/tensorflow_probability/python/internal/backend/numpy/v1.py @@ -23,7 +23,8 @@ import numpy as np import six -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal.backend.numpy import _utils as utils from tensorflow_probability.python.internal.backend.numpy import initializers @@ -195,91 +196,91 @@ def _placeholder_with_default(input, shape, name=None): # pylint: disable=redef assert_equal = utils.copy_docstring( - tf.compat.v1.assert_equal, + tf1.assert_equal, _assert_equal) assert_greater = utils.copy_docstring( - tf.compat.v1.assert_greater, + tf1.assert_greater, _assert_greater) assert_less = utils.copy_docstring( - tf.compat.v1.assert_less, + tf1.assert_less, _assert_less) assert_rank = utils.copy_docstring( - tf.compat.v1.assert_rank, + tf1.assert_rank, _assert_rank) assert_scalar = utils.copy_docstring( - tf.compat.v1.assert_scalar, + tf1.assert_scalar, _assert_scalar) assert_greater_equal = utils.copy_docstring( - tf.compat.v1.assert_greater_equal, + tf1.assert_greater_equal, _assert_greater_equal) assert_integer = utils.copy_docstring( - tf.compat.v1.assert_integer, + tf1.assert_integer, _assert_integer) assert_less_equal = utils.copy_docstring( - tf.compat.v1.assert_less_equal, + tf1.assert_less_equal, _assert_less_equal) assert_near = utils.copy_docstring( - tf.compat.v1.assert_near, + tf1.assert_near, _assert_near) assert_negative = utils.copy_docstring( - tf.compat.v1.assert_negative, + tf1.assert_negative, _assert_negative) assert_non_negative = utils.copy_docstring( - tf.compat.v1.assert_non_negative, + tf1.assert_non_negative, _assert_non_negative) assert_non_positive = utils.copy_docstring( - tf.compat.v1.assert_non_positive, + tf1.assert_non_positive, _assert_non_positive) assert_none_equal = utils.copy_docstring( - tf.compat.v1.assert_none_equal, + tf1.assert_none_equal, _assert_none_equal) assert_positive = utils.copy_docstring( - tf.compat.v1.assert_positive, + tf1.assert_positive, _assert_positive) assert_proper_iterable = utils.copy_docstring( - tf.compat.v1.assert_proper_iterable, + tf1.assert_proper_iterable, _assert_proper_iterable) assert_rank_at_least = utils.copy_docstring( - tf.compat.v1.assert_rank_at_least, + tf1.assert_rank_at_least, _assert_rank_at_least) assert_rank_in = utils.copy_docstring( - tf.compat.v1.assert_rank_in, + tf1.assert_rank_in, _assert_rank_in) colocate_with = utils.copy_docstring( - tf.compat.v1.colocate_with, + tf1.colocate_with, _colocate_with) get_variable = utils.copy_docstring( - tf.compat.v1.get_variable, + tf1.get_variable, _get_variable) placeholder_with_default = utils.copy_docstring( - tf.compat.v1.placeholder_with_default, + tf1.placeholder_with_default, _placeholder_with_default) global_variables_initializer = utils.copy_docstring( - tf.compat.v1.global_variables_initializer, + tf1.global_variables_initializer, lambda: None) set_random_seed = utils.copy_docstring( - tf.compat.v1.set_random_seed, + tf1.set_random_seed, lambda seed: np.random.seed(seed % (2**32 - 1))) diff --git a/tensorflow_probability/python/internal/distribution_util_test.py b/tensorflow_probability/python/internal/distribution_util_test.py index 378a1597cb..0893ce1e0e 100644 --- a/tensorflow_probability/python/internal/distribution_util_test.py +++ b/tensorflow_probability/python/internal/distribution_util_test.py @@ -23,7 +23,8 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.distributions import Categorical from tensorflow_probability.python.distributions import Mixture @@ -32,7 +33,7 @@ from tensorflow_probability.python.distributions import Normal from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.internal import tensorshape_util - +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @@ -98,7 +99,7 @@ def _make_diag_scale( @test_util.run_all_in_graph_and_eager_modes -class MakeTrilScaleTest(tf.test.TestCase): +class MakeTrilScaleTest(test_case.TestCase): def _testLegalInputs( self, loc=None, shape_hint=None, scale_params=None): @@ -164,7 +165,7 @@ def testAssertPositive(self): @test_util.run_all_in_graph_and_eager_modes -class MakeDiagScaleTest(tf.test.TestCase): +class MakeDiagScaleTest(test_case.TestCase): def _testLegalInputs( self, loc=None, shape_hint=None, scale_params=None): @@ -218,7 +219,7 @@ def testAssertPositive(self): @test_util.run_all_in_graph_and_eager_modes -class ShapesFromLocAndScaleTest(tf.test.TestCase): +class ShapesFromLocAndScaleTest(test_case.TestCase): def test_static_loc_static_scale_non_matching_event_size_raises(self): loc = tf.zeros([2, 4]) @@ -243,7 +244,7 @@ def test_static_loc_static_scale(self): def test_static_loc_dynamic_scale(self): loc = tf.zeros([2, 3]) - diag = tf.compat.v1.placeholder_with_default(np.ones([5, 1, 3]), shape=None) + diag = tf1.placeholder_with_default(np.ones([5, 1, 3]), shape=None) batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale( loc, tf.linalg.LinearOperatorDiag(diag)) @@ -259,7 +260,7 @@ def test_static_loc_dynamic_scale(self): self.assertAllEqual([3], event_shape_) def test_dynamic_loc_static_scale(self): - loc = tf.compat.v1.placeholder_with_default(np.zeros([2, 3]), shape=None) + loc = tf1.placeholder_with_default(np.zeros([2, 3]), shape=None) diag = tf.ones([5, 2, 3]) batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale( loc, tf.linalg.LinearOperatorDiag(diag)) @@ -276,8 +277,8 @@ def test_dynamic_loc_static_scale(self): self.assertAllEqual([3], event_shape_) def test_dynamic_loc_dynamic_scale(self): - loc = tf.compat.v1.placeholder_with_default(np.ones([2, 3]), shape=None) - diag = tf.compat.v1.placeholder_with_default(np.ones([5, 2, 3]), shape=None) + loc = tf1.placeholder_with_default(np.ones([2, 3]), shape=None) + diag = tf1.placeholder_with_default(np.ones([5, 2, 3]), shape=None) batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale( loc, tf.linalg.LinearOperatorDiag(diag)) @@ -305,7 +306,7 @@ def test_none_loc_static_scale(self): def test_none_loc_dynamic_scale(self): loc = None - diag = tf.compat.v1.placeholder_with_default(np.ones([5, 1, 3]), shape=None) + diag = tf1.placeholder_with_default(np.ones([5, 1, 3]), shape=None) batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale( loc, tf.linalg.LinearOperatorDiag(diag)) @@ -319,7 +320,7 @@ def test_none_loc_dynamic_scale(self): @test_util.run_all_in_graph_and_eager_modes -class GetBroadcastShapeTest(tf.test.TestCase): +class GetBroadcastShapeTest(test_case.TestCase): def test_all_static_shapes_work(self): x = tf.ones((2, 1, 3)) @@ -331,7 +332,7 @@ def test_all_static_shapes_work(self): def test_with_some_dynamic_shapes_works(self): if tf.executing_eagerly(): return x = tf.ones([2, 1, 3]) - y = tf.compat.v1.placeholder_with_default( + y = tf1.placeholder_with_default( np.ones([1, 5, 3], dtype=np.float32), shape=None) z = tf.ones([]) @@ -340,7 +341,7 @@ def test_with_some_dynamic_shapes_works(self): @test_util.run_all_in_graph_and_eager_modes -class MixtureStddevTest(tf.test.TestCase): +class MixtureStddevTest(test_case.TestCase): def test_mixture_dev(self): mixture_weights = np.array([ @@ -375,7 +376,7 @@ def test_mixture_dev(self): @test_util.run_all_in_graph_and_eager_modes -class PadMixtureDimensionsTest(tf.test.TestCase): +class PadMixtureDimensionsTest(test_case.TestCase): def test_pad_mixture_dimensions_mixture(self): gm = Mixture( @@ -416,14 +417,14 @@ def testNegAxisCorrectness(self): value_ = np.float32(0.25) count_ = np.int32(2) - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( x_, shape=x_.shape if self.is_static_shape else None) value = ( tf.constant(value_) if self.is_static_shape else - tf.compat.v1.placeholder_with_default(value_, shape=None)) + tf1.placeholder_with_default(value_, shape=None)) count = ( tf.constant(count_) if self.is_static_shape else - tf.compat.v1.placeholder_with_default(count_, shape=None)) + tf1.placeholder_with_default(count_, shape=None)) x0_front = distribution_util.pad( x, axis=-2, value=value, count=count, front=True) @@ -464,14 +465,14 @@ def testPosAxisCorrectness(self): [4, 5, 6]]) value_ = np.float32(0.25) count_ = np.int32(2) - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( x_, shape=x_.shape if self.is_static_shape else None) value = ( tf.constant(value_) if self.is_static_shape else - tf.compat.v1.placeholder_with_default(value_, shape=None)) + tf1.placeholder_with_default(value_, shape=None)) count = ( tf.constant(count_) if self.is_static_shape else - tf.compat.v1.placeholder_with_default(count_, shape=None)) + tf1.placeholder_with_default(count_, shape=None)) x1_front = distribution_util.pad( x, axis=1, value=value, count=count, front=True) @@ -503,7 +504,7 @@ def testPosAxisCorrectness(self): @test_util.run_all_in_graph_and_eager_modes -class PadStaticTest(_PadTest, tf.test.TestCase): +class PadStaticTest(_PadTest, test_case.TestCase): @property def is_static_shape(self): @@ -511,7 +512,7 @@ def is_static_shape(self): @test_util.run_all_in_graph_and_eager_modes -class PadDynamicTest(_PadTest, tf.test.TestCase): +class PadDynamicTest(_PadTest, test_case.TestCase): @property def is_static_shape(self): @@ -519,7 +520,7 @@ def is_static_shape(self): @test_util.run_all_in_graph_and_eager_modes -class PickScalarConditionTest(tf.test.TestCase): +class PickScalarConditionTest(test_case.TestCase): def test_pick_scalar_condition_static(self): @@ -544,8 +545,8 @@ def test_pick_scalar_condition_dynamic(self): neg = -np.exp(np.random.randn(3, 2, 4)).astype(np.float32) # TF dynamic cond - dynamic_true = tf.compat.v1.placeholder_with_default(input=True, shape=None) - dynamic_false = tf.compat.v1.placeholder_with_default( + dynamic_true = tf1.placeholder_with_default(input=True, shape=None) + dynamic_false = tf1.placeholder_with_default( input=False, shape=None) pos_ = self.evaluate(distribution_util.pick_scalar_condition( dynamic_true, pos, neg)) @@ -555,8 +556,8 @@ def test_pick_scalar_condition_dynamic(self): self.assertAllEqual(neg_, neg) # TF dynamic everything - pos_dynamic = tf.compat.v1.placeholder_with_default(input=pos, shape=None) - neg_dynamic = tf.compat.v1.placeholder_with_default(input=neg, shape=None) + pos_dynamic = tf1.placeholder_with_default(input=pos, shape=None) + neg_dynamic = tf1.placeholder_with_default(input=neg, shape=None) pos_ = self.evaluate(distribution_util.pick_scalar_condition( dynamic_true, pos_dynamic, neg_dynamic)) neg_ = self.evaluate(distribution_util.pick_scalar_condition( @@ -566,7 +567,7 @@ def test_pick_scalar_condition_dynamic(self): @test_util.run_all_in_graph_and_eager_modes -class TestMoveDimension(tf.test.TestCase): +class TestMoveDimension(test_case.TestCase): def test_move_dimension_static_shape(self): @@ -591,7 +592,7 @@ def test_move_dimension_static_shape(self): def test_move_dimension_dynamic_shape(self): x_ = tf.random.normal(shape=[200, 30, 4, 1, 6]) - x = tf.compat.v1.placeholder_with_default(input=x_, shape=None) + x = tf1.placeholder_with_default(input=x_, shape=None) x_perm1 = distribution_util.move_dimension(x, 1, 1) x_perm2 = distribution_util.move_dimension(x, 0, 3) @@ -620,27 +621,27 @@ def test_move_dimension_dynamic_shape(self): def test_move_dimension_dynamic_indices(self): x_ = tf.random.normal(shape=[200, 30, 4, 1, 6]) - x = tf.compat.v1.placeholder_with_default(input=x_, shape=None) + x = tf1.placeholder_with_default(input=x_, shape=None) x_perm1 = distribution_util.move_dimension( - x, tf.compat.v1.placeholder_with_default(input=1, shape=[]), - tf.compat.v1.placeholder_with_default(input=1, shape=[])) + x, tf1.placeholder_with_default(input=1, shape=[]), + tf1.placeholder_with_default(input=1, shape=[])) x_perm2 = distribution_util.move_dimension( - x, tf.compat.v1.placeholder_with_default(input=0, shape=[]), - tf.compat.v1.placeholder_with_default(input=3, shape=[])) + x, tf1.placeholder_with_default(input=0, shape=[]), + tf1.placeholder_with_default(input=3, shape=[])) x_perm3 = distribution_util.move_dimension( - x, tf.compat.v1.placeholder_with_default(input=0, shape=[]), - tf.compat.v1.placeholder_with_default(input=-2, shape=[])) + x, tf1.placeholder_with_default(input=0, shape=[]), + tf1.placeholder_with_default(input=-2, shape=[])) x_perm4 = distribution_util.move_dimension( - x, tf.compat.v1.placeholder_with_default(input=4, shape=[]), - tf.compat.v1.placeholder_with_default(input=2, shape=[])) + x, tf1.placeholder_with_default(input=4, shape=[]), + tf1.placeholder_with_default(input=2, shape=[])) x_perm5 = distribution_util.move_dimension( - x, tf.compat.v1.placeholder_with_default(input=-1, shape=[]), - tf.compat.v1.placeholder_with_default(input=2, shape=[])) + x, tf1.placeholder_with_default(input=-1, shape=[]), + tf1.placeholder_with_default(input=2, shape=[])) x_perm1_, x_perm2_, x_perm3_, x_perm4_, x_perm5_ = self.evaluate([ tf.shape(input=x_perm1), @@ -662,19 +663,19 @@ def test_move_dimension_dynamic_indices(self): @test_util.run_all_in_graph_and_eager_modes -class AssertCloseTest(tf.test.TestCase): +class AssertCloseTest(test_case.TestCase): def testAssertIntegerForm(self): # This should only be detected as an integer. - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.array([1., 5, 10, 15, 20], dtype=np.float32), shape=None) - y = tf.compat.v1.placeholder_with_default( + y = tf1.placeholder_with_default( np.array([1.1, 5, 10, 15, 20], dtype=np.float32), shape=None) # First component isn't less than float32.eps = 1e-7 - z = tf.compat.v1.placeholder_with_default( + z = tf1.placeholder_with_default( np.array([1.0001, 5, 10, 15, 20], dtype=np.float32), shape=None) # This shouldn't be detected as an integer. - w = tf.compat.v1.placeholder_with_default( + w = tf1.placeholder_with_default( np.array([1e-8, 5, 10, 15, 20], dtype=np.float32), shape=None) with tf.control_dependencies([distribution_util.assert_integer_form(x)]): @@ -697,7 +698,7 @@ def testAssertIntegerForm(self): @test_util.run_all_in_graph_and_eager_modes -class MaybeGetStaticTest(tf.test.TestCase): +class MaybeGetStaticTest(test_case.TestCase): def testGetStaticInt(self): x = 2 @@ -723,7 +724,7 @@ def testGetStaticConstant(self): def testGetStaticPlaceholder(self): if tf.executing_eagerly(): return - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.array([2.], dtype=np.int32), shape=[1]) self.assertEqual(None, distribution_util.maybe_get_static_value(x)) self.assertEqual( @@ -731,7 +732,7 @@ def testGetStaticPlaceholder(self): @test_util.run_all_in_graph_and_eager_modes -class GetLogitsAndProbsTest(tf.test.TestCase): +class GetLogitsAndProbsTest(test_case.TestCase): def testImproperArguments(self): with self.assertRaises(ValueError): @@ -859,7 +860,7 @@ def testProbsMultidimShape(self): with self.assertRaisesOpError( 'Number of classes exceeds `dtype` precision'): p = np.ones([int(2**11+1)], dtype=np.float16) - p = tf.compat.v1.placeholder_with_default(p, shape=None) + p = tf1.placeholder_with_default(p, shape=None) self.evaluate(distribution_util.get_logits_and_probs( probs=p, multidimensional=True, validate_args=True)) @@ -874,14 +875,14 @@ def testLogitsMultidimShape(self): with self.assertRaisesOpError( 'Number of classes exceeds `dtype` precision'): l = np.ones([int(2**11+1)], dtype=np.float16) - l = tf.compat.v1.placeholder_with_default(l, shape=None) + l = tf1.placeholder_with_default(l, shape=None) logit, _ = distribution_util.get_logits_and_probs( logits=l, multidimensional=True, validate_args=True) self.evaluate(logit) @test_util.run_all_in_graph_and_eager_modes -class EmbedCheckCategoricalEventShapeTest(tf.test.TestCase): +class EmbedCheckCategoricalEventShapeTest(test_case.TestCase): def testTooSmall(self): with self.assertRaises(ValueError): @@ -892,7 +893,7 @@ def testTooSmall(self): if tf.executing_eagerly(): return with self.assertRaisesOpError( 'must have at least 2 events'): - param = tf.compat.v1.placeholder_with_default( + param = tf1.placeholder_with_default( np.ones([1], dtype=np.float16), shape=None) checked_param = distribution_util.embed_check_categorical_event_shape( param) @@ -907,7 +908,7 @@ def testTooLarge(self): if tf.executing_eagerly(): return with self.assertRaisesOpError( 'Number of classes exceeds `dtype` precision'): - param = tf.compat.v1.placeholder_with_default( + param = tf1.placeholder_with_default( np.ones([int(2**11+1)], dtype=np.float16), shape=None) checked_param = distribution_util.embed_check_categorical_event_shape( param) @@ -922,11 +923,11 @@ def testUnsupportedDtype(self): @test_util.run_all_in_graph_and_eager_modes -class EmbedCheckIntegerCastingClosedTest(tf.test.TestCase): +class EmbedCheckIntegerCastingClosedTest(test_case.TestCase): def testCorrectlyAssertsNonnegative(self): with self.assertRaisesOpError('Elements must be non-negative'): - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.array([1, -1], dtype=np.float16), shape=None) x_checked = distribution_util.embed_check_integer_casting_closed( x, target_dtype=tf.int16) @@ -934,7 +935,7 @@ def testCorrectlyAssertsNonnegative(self): def testCorrectlyAssertsPositive(self): with self.assertRaisesOpError('Elements must be positive'): - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.array([1, 0], dtype=np.float16), shape=None) x_checked = distribution_util.embed_check_integer_casting_closed( x, target_dtype=tf.int16, assert_positive=True) @@ -942,7 +943,7 @@ def testCorrectlyAssertsPositive(self): def testCorrectlyAssersIntegerForm(self): with self.assertRaisesOpError('Elements must be int16-equivalent.'): - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.array([1, 1.5], dtype=np.float16), shape=None) x_checked = distribution_util.embed_check_integer_casting_closed( x, target_dtype=tf.int16) @@ -950,7 +951,7 @@ def testCorrectlyAssersIntegerForm(self): def testCorrectlyAssertsLargestPossibleInteger(self): with self.assertRaisesOpError('Elements cannot exceed 32767.'): - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.array([1, 2**15], dtype=np.int32), shape=None) x_checked = distribution_util.embed_check_integer_casting_closed( x, target_dtype=tf.int16) @@ -958,7 +959,7 @@ def testCorrectlyAssertsLargestPossibleInteger(self): def testCorrectlyAssertsSmallestPossibleInteger(self): with self.assertRaisesOpError('Elements cannot be smaller than 0.'): - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.array([1, -1], dtype=np.int32), shape=None) x_checked = distribution_util.embed_check_integer_casting_closed( x, target_dtype=tf.uint16, assert_nonnegative=False) @@ -966,27 +967,27 @@ def testCorrectlyAssertsSmallestPossibleInteger(self): @test_util.run_all_in_graph_and_eager_modes -class DynamicShapeTest(tf.test.TestCase): +class DynamicShapeTest(test_case.TestCase): def testSameDynamicShape(self): scalar = tf.constant(2.) - scalar1 = tf.compat.v1.placeholder_with_default( + scalar1 = tf1.placeholder_with_default( np.array(2., dtype=np.float32), shape=None) vector = tf.constant([0.3, 0.4, 0.5]) - vector1 = tf.compat.v1.placeholder_with_default( + vector1 = tf1.placeholder_with_default( np.array([2., 3., 4.], dtype=np.float32), shape=[None]) - vector2 = tf.compat.v1.placeholder_with_default( + vector2 = tf1.placeholder_with_default( np.array([2., 3.5, 6.], dtype=np.float32), shape=[None]) multidimensional = tf.constant([[0.3, 0.4], [0.2, 0.6]]) - multidimensional1 = tf.compat.v1.placeholder_with_default( + multidimensional1 = tf1.placeholder_with_default( np.array([[2., 3.], [3., 4.]], dtype=np.float32), shape=[None, None]) - multidimensional2 = tf.compat.v1.placeholder_with_default( + multidimensional2 = tf1.placeholder_with_default( np.array([[1., 3.5], [6.3, 2.3]], dtype=np.float32), shape=[None, None]) - multidimensional3 = tf.compat.v1.placeholder_with_default( + multidimensional3 = tf1.placeholder_with_default( np.array([[1., 3.5, 5.], [6.3, 2.3, 7.1]], dtype=np.float32), shape=[None, None]) @@ -1038,7 +1039,7 @@ def testSameDynamicShape(self): @test_util.run_all_in_graph_and_eager_modes -class RotateTransposeTest(tf.test.TestCase): +class RotateTransposeTest(test_case.TestCase): def _np_rotate_transpose(self, x, shift): if not isinstance(x, np.ndarray): @@ -1065,15 +1066,15 @@ def testRollDynamic(self): np.ones([2, 1], dtype=np.float32), np.ones([3, 2, 1], dtype=np.float32)): for shift_value in np.arange(-5, 5).astype(np.int32): - x = tf.compat.v1.placeholder_with_default(x_value, shape=None) - shift = tf.compat.v1.placeholder_with_default(shift_value, shape=None) + x = tf1.placeholder_with_default(x_value, shape=None) + shift = tf1.placeholder_with_default(shift_value, shape=None) self.assertAllEqual( self._np_rotate_transpose(x_value, shift_value), self.evaluate(distribution_util.rotate_transpose(x, shift))) @test_util.run_all_in_graph_and_eager_modes -class PickVectorTest(tf.test.TestCase): +class PickVectorTest(test_case.TestCase): def testCorrectlyPicksVector(self): x = np.arange(10, 12) @@ -1091,7 +1092,7 @@ def testCorrectlyPicksVector(self): @test_util.run_all_in_graph_and_eager_modes -class PreferStaticRankTest(tf.test.TestCase): +class PreferStaticRankTest(test_case.TestCase): def testNonEmptyConstantTensor(self): x = tf.zeros([2, 3, 4]) @@ -1116,28 +1117,28 @@ def testScalarTensor(self): def testDynamicRankEndsUpBeingNonEmpty(self): if tf.executing_eagerly(): return - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.zeros([2, 3], dtype=np.float64), shape=None) rank = distribution_util.prefer_static_rank(x) self.assertAllEqual(2, self.evaluate(rank)) def testDynamicRankEndsUpBeingEmpty(self): if tf.executing_eagerly(): return - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.array([], dtype=np.int32), shape=None) rank = distribution_util.prefer_static_rank(x) self.assertAllEqual(1, self.evaluate(rank)) def testDynamicRankEndsUpBeingScalar(self): if tf.executing_eagerly(): return - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.array(1, dtype=np.int32), shape=None) rank = distribution_util.prefer_static_rank(x) self.assertAllEqual(0, self.evaluate(rank)) @test_util.run_all_in_graph_and_eager_modes -class PreferStaticShapeTest(tf.test.TestCase): +class PreferStaticShapeTest(test_case.TestCase): def testNonEmptyConstantTensor(self): x = tf.zeros((2, 3, 4)) @@ -1159,28 +1160,28 @@ def testScalarTensor(self): def testDynamicShapeEndsUpBeingNonEmpty(self): if tf.executing_eagerly(): return - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.zeros([2, 3], dtype=np.float64), shape=None) shape = distribution_util.prefer_static_shape(x) self.assertAllEqual([2, 3], self.evaluate(shape)) def testDynamicShapeEndsUpBeingEmpty(self): if tf.executing_eagerly(): return - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.array([], dtype=np.int32), shape=None) shape = distribution_util.prefer_static_shape(x) self.assertAllEqual([0], self.evaluate(shape)) def testDynamicShapeEndsUpBeingScalar(self): if tf.executing_eagerly(): return - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.array(1, dtype=np.int32), shape=None) shape = distribution_util.prefer_static_shape(x) self.assertAllEqual([], self.evaluate(shape)) @test_util.run_all_in_graph_and_eager_modes -class PreferStaticValueTest(tf.test.TestCase): +class PreferStaticValueTest(test_case.TestCase): def testNonEmptyConstantTensor(self): x = tf.zeros((2, 3, 4)) @@ -1203,7 +1204,7 @@ def testScalarTensor(self): def testDynamicValueEndsUpBeingNonEmpty(self): if tf.executing_eagerly(): return - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.zeros((2, 3), dtype=np.float64), shape=None) value = distribution_util.prefer_static_value(x) self.assertAllEqual(np.zeros((2, 3)), @@ -1211,21 +1212,21 @@ def testDynamicValueEndsUpBeingNonEmpty(self): def testDynamicValueEndsUpBeingEmpty(self): if tf.executing_eagerly(): return - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.array([], dtype=np.int32), shape=None) value = distribution_util.prefer_static_value(x) self.assertAllEqual(np.array([]), self.evaluate(value)) def testDynamicValueEndsUpBeingScalar(self): if tf.executing_eagerly(): return - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.array(1, dtype=np.int32), shape=None) value = distribution_util.prefer_static_value(x) self.assertAllEqual(np.array(1), self.evaluate(value)) # No need for eager tests; this function doesn't depend on TF. -class GenNewSeedTest(tf.test.TestCase): +class GenNewSeedTest(test_case.TestCase): def testOnlyNoneReturnsNone(self): self.assertIsNotNone(distribution_util.gen_new_seed(0, 'salt')) @@ -1233,7 +1234,7 @@ def testOnlyNoneReturnsNone(self): @test_util.run_all_in_graph_and_eager_modes -class ArgumentsTest(tf.test.TestCase): +class ArgumentsTest(test_case.TestCase): def testNoArguments(self): def foo(): @@ -1291,7 +1292,7 @@ def foo(a, b, c, *varargs, **kwargs): # pylint: disable=unused-argument @test_util.run_all_in_graph_and_eager_modes -class ExpandToVectorTest(tf.test.TestCase): +class ExpandToVectorTest(test_case.TestCase): def _check_static(self, expected, actual, dtype=np.int32): const_actual = tf.get_static_value(actual) @@ -1356,7 +1357,7 @@ def test_expand_to_vector_on_tensors(self): # Helper to construct a placeholder and call expand_to_tensor on it. def _expand_tensor(x, shape=None, dtype=np.int32, validate_args=False): return distribution_util.expand_to_vector( - tf.compat.v1.placeholder_with_default( + tf1.placeholder_with_default( np.array(x, dtype=dtype), shape=shape), tensor_name='name_for_tensor', validate_args=validate_args) @@ -1394,24 +1395,24 @@ def _expand_tensor(x, shape=None, dtype=np.int32, validate_args=False): class WithDependenciesTestCase(test_util.TensorFlowTestCase): def testTupleDependencies(self): - counter = tf.compat.v2.Variable(0, name='my_counter') + counter = tf.Variable(0, name='my_counter') const_with_dep = distribution_util.with_dependencies( - (tf.compat.v1.assign_add(counter, 1), tf.constant(42)), + (tf1.assign_add(counter, 1), tf.constant(42)), tf.constant(7)) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) self.assertEqual(1 if tf.executing_eagerly() else 0, self.evaluate(counter)) self.assertEqual(7, self.evaluate(const_with_dep)) self.assertEqual(1, self.evaluate(counter)) def testListDependencies(self): - counter = tf.compat.v2.Variable(0, name='my_counter') + counter = tf.Variable(0, name='my_counter') const_with_dep = distribution_util.with_dependencies( - [tf.compat.v1.assign_add(counter, 1), tf.constant(42)], + [tf1.assign_add(counter, 1), tf.constant(42)], tf.constant(7)) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) self.assertEqual(1 if tf.executing_eagerly() else 0, self.evaluate(counter)) self.assertEqual(7, self.evaluate(const_with_dep)) diff --git a/tensorflow_probability/python/internal/docstring_util_test.py b/tensorflow_probability/python/internal/docstring_util_test.py index 406ebe6676..9ecced6c31 100644 --- a/tensorflow_probability/python/internal/docstring_util_test.py +++ b/tensorflow_probability/python/internal/docstring_util_test.py @@ -18,12 +18,13 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import docstring_util +from tensorflow_probability.python.internal import test_case -class DocstringUtil(tf.test.TestCase): +class DocstringUtil(test_case.TestCase): def _testFunction(self): doc_args = """x: Input to return as output. diff --git a/tensorflow_probability/python/internal/dtype_util_test.py b/tensorflow_probability/python/internal/dtype_util_test.py index 8df9514bca..089258530c 100644 --- a/tensorflow_probability/python/internal/dtype_util_test.py +++ b/tensorflow_probability/python/internal/dtype_util_test.py @@ -27,10 +27,11 @@ import tensorflow_probability as tfp from tensorflow_probability.python.internal import dtype_util +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -class DtypeUtilTest(tf.test.TestCase): +class DtypeUtilTest(test_case.TestCase): def testIsInteger(self): self.assertFalse(dtype_util.is_integer(np.float64)) @@ -70,7 +71,7 @@ def testCommonDtypeFromEdRV(self): tf.float64, dtype_util.common_dtype([x], dtype_hint=tf.float32)) -class FloatDTypeTest(tf.test.TestCase): +class FloatDTypeTest(test_case.TestCase): @test_util.run_in_graph_and_eager_modes def test_assert_same_float_dtype(self): diff --git a/tensorflow_probability/python/internal/hypothesis_testlib_test.py b/tensorflow_probability/python/internal/hypothesis_testlib_test.py index 97bea9f13a..6ff3d07a11 100644 --- a/tensorflow_probability/python/internal/hypothesis_testlib_test.py +++ b/tensorflow_probability/python/internal/hypothesis_testlib_test.py @@ -25,14 +25,15 @@ import hypothesis as hp from hypothesis import strategies as hps import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class HypothesisTestlibTest(parameterized.TestCase, tf.test.TestCase): +class HypothesisTestlibTest(parameterized.TestCase, test_case.TestCase): @parameterized.parameters((support,) for support in tfp_hps.ALL_SUPPORTS) @hp.given(hps.data()) diff --git a/tensorflow_probability/python/internal/nest_util_test.py b/tensorflow_probability/python/internal/nest_util_test.py index 9b60bf076c..1afe417d4d 100644 --- a/tensorflow_probability/python/internal/nest_util_test.py +++ b/tensorflow_probability/python/internal/nest_util_test.py @@ -24,9 +24,10 @@ from absl.testing import parameterized import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import nest_util +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @@ -63,7 +64,7 @@ class LeafNamedTuple( @test_util.run_all_in_graph_and_eager_modes -class NestUtilTest(tf.test.TestCase, parameterized.TestCase): +class NestUtilTest(test_case.TestCase, parameterized.TestCase): @parameterized.parameters((1, [2, 2], [1, 1]), ([1], [2, 2], [1, 1]), diff --git a/tensorflow_probability/python/internal/special_math_test.py b/tensorflow_probability/python/internal/special_math_test.py index 95a7602fd8..0e75bc81b5 100644 --- a/tensorflow_probability/python/internal/special_math_test.py +++ b/tensorflow_probability/python/internal/special_math_test.py @@ -25,11 +25,12 @@ from scipy import special as sp_special from scipy import stats as sp_stats -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import special_math +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.math.gradient import value_and_gradient - from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @@ -56,7 +57,7 @@ def _make_grid(dtype, grid_spec): @test_util.run_all_in_graph_and_eager_modes -class NdtriTest(tf.test.TestCase): +class NdtriTest(test_case.TestCase): def assertAllFinite(self, x): is_finite = np.isfinite(x) @@ -78,7 +79,7 @@ def testNdtri(self): def testNdtriDynamicShape(self): """Verifies that ndtri computation is correct.""" p_ = np.linspace(0., 1., 50).astype(np.float32) - p = tf.compat.v1.placeholder_with_default(p_, shape=None) + p = tf1.placeholder_with_default(p_, shape=None) self.assertAllClose(sp_special.ndtri(p_), self.evaluate(special_math.ndtri(p)), atol=0.) @@ -108,7 +109,7 @@ def testNdtriFiniteGradientFloat64(self): @test_util.run_all_in_graph_and_eager_modes -class NdtrTest(tf.test.TestCase): +class NdtrTest(test_case.TestCase): _use_log = False # Grid min/max chosen to ensure 0 < cdf(x) < 1. _grid32 = GridSpec(min=-12.9, max=5., shape=[100]) @@ -224,7 +225,7 @@ class LogNdtrTestUpper(NdtrTest): @test_util.run_all_in_graph_and_eager_modes -class NdtrGradientTest(tf.test.TestCase): +class NdtrGradientTest(test_case.TestCase): _use_log = False _grid = GridSpec(min=-100., max=100., shape=[1, 2, 3, 8]) _error32 = ErrorSpec(rtol=1e-4, atol=0) @@ -299,7 +300,7 @@ class LogNdtrGradientTest(NdtrGradientTest): @test_util.run_all_in_graph_and_eager_modes -class ErfInvTest(tf.test.TestCase): +class ErfInvTest(test_case.TestCase): def testErfInvValues(self): x = np.linspace(0., 1., 50).astype(np.float64) @@ -318,7 +319,7 @@ def testErfInvIntegerInput(self): @test_util.run_all_in_graph_and_eager_modes -class LogCDFLaplaceTest(tf.test.TestCase): +class LogCDFLaplaceTest(test_case.TestCase): # Note that scipy.stats.laplace does not have a stable Log CDF, so we cannot # rely on scipy to cross check the extreme values. diff --git a/tensorflow_probability/python/internal/test_case_test.py b/tensorflow_probability/python/internal/test_case_test.py index 3a52ce71f6..b3e0426721 100644 --- a/tensorflow_probability/python/internal/test_case_test.py +++ b/tensorflow_probability/python/internal/test_case_test.py @@ -20,7 +20,8 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import dtype_util from tensorflow_probability.python.internal import test_case @@ -108,7 +109,7 @@ def test_assert_all_nan_input_inf(self): def test_assert_all_nan_input_placeholder_with_default(self): all_nan = np.full((10, 10, 10), np.nan).astype(dtype_util.as_numpy_dtype(self.dtype)) - a = tf.compat.v1.placeholder_with_default( + a = tf1.placeholder_with_default( input=all_nan, shape=all_nan.shape) self.assertAllNan(a) diff --git a/tensorflow_probability/python/internal/test_util.py b/tensorflow_probability/python/internal/test_util.py index 6408789732..a307bd69bf 100644 --- a/tensorflow_probability/python/internal/test_util.py +++ b/tensorflow_probability/python/internal/test_util.py @@ -21,13 +21,13 @@ import os # Dependency imports + from absl import flags from absl import logging import numpy as np import six - -import tensorflow as tf - +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import dtype_util from tensorflow_probability.python.internal.backend.numpy import ops from tensorflow_probability.python.util.seed_stream import SeedStream @@ -124,7 +124,7 @@ def test_seed(hardcoded_seed=None, set_eager_seed=True): `--fixed_seed` takes precedence over `--vary_seed` when both are present. Note that TensorFlow graph mode operations tend to read seed state from two - sources: a "graph-level seed" and an "op-level seed". tf.test.TestCase will + sources: a "graph-level seed" and an "op-level seed". test_case.TestCase will set the former to a fixed value per test, but in general it may be necessary to explicitly set both to ensure reproducibility. @@ -160,7 +160,7 @@ def test_seed(hardcoded_seed=None, set_eager_seed=True): def _wrap_seed(seed, set_eager_seed): # TODO(b/68017812): Remove this clause once eager correctly supports seeding. if tf.executing_eagerly() and set_eager_seed: - tf.compat.v1.set_random_seed(seed) + tf1.set_random_seed(seed) return seed @@ -190,7 +190,7 @@ def test_seed_stream(salt='Salt of the Earth', hardcoded_seed=None): `--vary_seed` when both are present. Note that TensorFlow graph mode operations tend to read seed state from two - sources: a "graph-level seed" and an "op-level seed". tf.test.TestCase will + sources: a "graph-level seed" and an "op-level seed". test_case.TestCase will set the former to a fixed value per test, but in general it may be necessary to explicitly set both to ensure reproducibility. @@ -346,7 +346,7 @@ def histogram(self, x, value_range=None, nbins=None, name=None): `counts[i] = sum{ edges[i-1] <= values[j] < edges[i] : j }`. edges: 1D `Tensor` characterizing intervals used for counting. """ - with tf.compat.v2.name_scope(name or 'histogram'): + with tf.name_scope(name or 'histogram'): x = tf.convert_to_tensor(value=x, name='x') if value_range is None: value_range = [ @@ -461,13 +461,13 @@ def monte_carlo_hypersphere_volume(dist, num_samples, radius, center): x = dist.sample(num_samples, seed=seed) x = tf.identity(x) # Invalidate bijector cacheing. inverse_log_prob = tf.exp(-dist.log_prob(x)) - importance_weights = tf.compat.v1.where( + importance_weights = tf1.where( tf.norm(tensor=x - center, axis=-1) <= radius, inverse_log_prob, tf.zeros_like(inverse_log_prob)) return tf.reduce_mean(input_tensor=importance_weights, axis=0) # Build graph. - with tf.compat.v2.name_scope('run_test_sample_consistent_log_prob'): + with tf.name_scope('run_test_sample_consistent_log_prob'): batch_shape = dist.batch_shape_tensor() actual_volume = actual_hypersphere_volume( dims=dist.event_shape_tensor()[0], @@ -477,7 +477,7 @@ def monte_carlo_hypersphere_volume(dist, num_samples, radius, center): num_samples=num_samples, radius=radius, center=center) - init_op = tf.compat.v1.global_variables_initializer() + init_op = tf1.global_variables_initializer() # Execute graph. sess_run_fn(init_op) @@ -562,5 +562,5 @@ def run_test_sample_consistent_mean_covariance( def _vec_outer_square(x, name=None): """Computes the outer-product of a vector, i.e., x.T x.""" - with tf.compat.v2.name_scope(name or 'vec_osquare'): + with tf.name_scope(name or 'vec_osquare'): return x[..., :, tf.newaxis] * x[..., tf.newaxis, :] diff --git a/tensorflow_probability/python/internal/test_util_test.py b/tensorflow_probability/python/internal/test_util_test.py index 7c21d43e65..7e9654a932 100644 --- a/tensorflow_probability/python/internal/test_util_test.py +++ b/tensorflow_probability/python/internal/test_util_test.py @@ -24,10 +24,12 @@ import numpy as np import tensorflow.compat.v2 as tf +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow_probability.python.util.seed_stream import SeedStream from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import + FLAGS = flags.FLAGS @@ -38,11 +40,12 @@ def _maybe_jax(x): if JAX_MODE: from jax import random as jaxrand # pylint: disable=g-import-not-at-top x = jaxrand.PRNGKey(x) + return x @test_util.run_all_in_graph_and_eager_modes -class SeedSettingTest(tf.test.TestCase): +class SeedSettingTest(test_case.TestCase): def testTypeCorrectness(self): assert isinstance(tfp_test_util.test_seed_stream(), SeedStream) diff --git a/tensorflow_probability/python/layers/BUILD b/tensorflow_probability/python/layers/BUILD index cbff5fc2cc..2240907d40 100644 --- a/tensorflow_probability/python/layers/BUILD +++ b/tensorflow_probability/python/layers/BUILD @@ -118,6 +118,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -143,6 +144,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -163,6 +165,7 @@ py_test( deps = [ # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -185,6 +188,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -217,6 +221,7 @@ py_test( deps = [ # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -238,6 +243,7 @@ py_test( # absl/testing:parameterized dep, # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/layers:weight_norm", ], ) diff --git a/tensorflow_probability/python/layers/conv_variational.py b/tensorflow_probability/python/layers/conv_variational.py index 283ebc8d89..705064e6c7 100644 --- a/tensorflow_probability/python/layers/conv_variational.py +++ b/tensorflow_probability/python/layers/conv_variational.py @@ -18,7 +18,7 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import docstring_util diff --git a/tensorflow_probability/python/layers/conv_variational_test.py b/tensorflow_probability/python/layers/conv_variational_test.py index 9e2a2b688e..3fcf007381 100644 --- a/tensorflow_probability/python/layers/conv_variational_test.py +++ b/tensorflow_probability/python/layers/conv_variational_test.py @@ -21,7 +21,8 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp from tensorflow_probability.python.internal import test_case @@ -331,7 +332,7 @@ def _testConvSetUp(self, layer_class, batch_size, depth=None, bias_divergence = MockKLDivergence( result=tf.random.uniform([], seed=seed())) - tf.compat.v1.set_random_seed(5995) + tf1.set_random_seed(5995) layer = layer_class( filters=filters, kernel_size=kernel_size, @@ -428,7 +429,7 @@ def _testConvFlipout(self, layer_class): # pylint: disable=invalid-name depth=depth, height=height, width=width, channels=channels, filters=filters, seed=44) - tf.compat.v1.set_random_seed(5995) + tf1.set_random_seed(5995) convolution_op = nn_ops.Convolution( tf.TensorShape(inputs.shape), @@ -615,7 +616,7 @@ def _testLayerInSequential(self, layer_class): # pylint: disable=invalid-name output = net(inputs) # Verify that the network runs without errors - sess.run(tf.compat.v1.global_variables_initializer()) + sess.run(tf1.global_variables_initializer()) sess.run(output) def testKerasLayerConvolution1DReparameterization(self): diff --git a/tensorflow_probability/python/layers/dense_variational.py b/tensorflow_probability/python/layers/dense_variational.py index 0ec2dc125e..f73c02cb9a 100644 --- a/tensorflow_probability/python/layers/dense_variational.py +++ b/tensorflow_probability/python/layers/dense_variational.py @@ -18,7 +18,7 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import docstring_util diff --git a/tensorflow_probability/python/layers/dense_variational_test.py b/tensorflow_probability/python/layers/dense_variational_test.py index 95e4161dec..7081bb4a16 100644 --- a/tensorflow_probability/python/layers/dense_variational_test.py +++ b/tensorflow_probability/python/layers/dense_variational_test.py @@ -21,7 +21,8 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp from tensorflow_probability.python.internal import test_case @@ -312,14 +313,14 @@ def testDenseReparameterization(self): def testDenseLocalReparameterization(self): batch_size, in_size, out_size = 2, 3, 4 with self.cached_session() as sess: - tf.compat.v1.set_random_seed(9068) + tf1.set_random_seed(9068) (kernel_posterior, kernel_prior, kernel_divergence, bias_posterior, bias_prior, bias_divergence, layer, inputs, outputs, kl_penalty) = self._testDenseSetUp( tfp.layers.DenseLocalReparameterization, batch_size, in_size, out_size) - tf.compat.v1.set_random_seed(9068) + tf1.set_random_seed(9068) expected_kernel_posterior_affine = tfd.Normal( loc=tf.matmul(inputs, kernel_posterior.result_loc), scale=tf.matmul( @@ -377,14 +378,14 @@ def testDenseLocalReparameterization(self): def testDenseFlipout(self): batch_size, in_size, out_size = 2, 3, 4 with self.cached_session() as sess: - tf.compat.v1.set_random_seed(9069) + tf1.set_random_seed(9069) (kernel_posterior, kernel_prior, kernel_divergence, bias_posterior, bias_prior, bias_divergence, layer, inputs, outputs, kl_penalty) = self._testDenseSetUp( tfp.layers.DenseFlipout, batch_size, in_size, out_size, seed=44) - tf.compat.v1.set_random_seed(9069) + tf1.set_random_seed(9069) expected_kernel_posterior_affine = tfd.Normal( loc=tf.zeros_like(kernel_posterior.result_loc), scale=kernel_posterior.result_scale) diff --git a/tensorflow_probability/python/layers/dense_variational_v2.py b/tensorflow_probability/python/layers/dense_variational_v2.py index caf4b1c4c1..0af81b2a7e 100644 --- a/tensorflow_probability/python/layers/dense_variational_v2.py +++ b/tensorflow_probability/python/layers/dense_variational_v2.py @@ -18,7 +18,8 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.distributions import kullback_leibler @@ -164,7 +165,7 @@ def kl_divergence_fn(distribution_a, distribution_b): # Closure over: kl_divergence_fn, weight. def _fn(distribution_a, distribution_b): """Closure that computes KLDiv as a function of `a` as in `KL[a, b]`.""" - with tf.compat.v1.name_scope('kldivergence_loss'): + with tf1.name_scope('kldivergence_loss'): kl = kl_divergence_fn(distribution_a, distribution_b) if weight is not None: kl = tf.cast(weight, dtype=kl.dtype) * kl diff --git a/tensorflow_probability/python/layers/dense_variational_v2_test.py b/tensorflow_probability/python/layers/dense_variational_v2_test.py index 1401811470..96dc64a634 100644 --- a/tensorflow_probability/python/layers/dense_variational_v2_test.py +++ b/tensorflow_probability/python/layers/dense_variational_v2_test.py @@ -19,15 +19,15 @@ from __future__ import print_function # Dependency imports -import numpy as np -import tensorflow as tf +import numpy as np +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions - def create_dataset(): np.random.seed(43) @@ -76,7 +76,7 @@ def prior_trainable(kernel_size, bias_size=0, dtype=None): @test_util.run_all_in_graph_and_eager_modes -class DenseVariationalLayerTest(tf.test.TestCase): +class DenseVariationalLayerTest(test_case.TestCase): def test_end_to_end(self): # Get dataset. @@ -89,7 +89,7 @@ def test_end_to_end(self): ]) # Do inference. - model.compile(optimizer=tf.compat.v2.optimizers.Adam(learning_rate=0.05), + model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.05), loss=negloglik) model.fit(x, y, epochs=2, verbose=False) diff --git a/tensorflow_probability/python/layers/distribution_layer.py b/tensorflow_probability/python/layers/distribution_layer.py index 2fb70bd5de..aad8b6a2d0 100644 --- a/tensorflow_probability/python/layers/distribution_layer.py +++ b/tensorflow_probability/python/layers/distribution_layer.py @@ -28,7 +28,8 @@ from cloudpickle import CloudPickler import numpy as np import six -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf # By importing `distributions` as `tfd`, docstrings will show # `tfd.Distribution`. We import `bijectors` the same way, for consistency. @@ -75,7 +76,7 @@ def _event_size(event_shape, name=None): when the number of elements can be computed immediately. Otherwise, returns a scalar tensor. """ - with tf.compat.v1.name_scope(name, 'event_size', [event_shape]): + with tf1.name_scope(name, 'event_size', [event_shape]): event_shape = tf.convert_to_tensor( value=event_shape, dtype=tf.int32, name='event_shape') @@ -347,7 +348,7 @@ def __init__(self, @staticmethod def new(params, event_size, validate_args=False, name=None): """Create the distribution instance from a `params` vector.""" - with tf.compat.v1.name_scope(name, 'MultivariateNormalTriL', + with tf1.name_scope(name, 'MultivariateNormalTriL', [params, event_size]): params = tf.convert_to_tensor(value=params, name='params') scale_tril = tfb.ScaleTriL( @@ -361,7 +362,7 @@ def new(params, event_size, validate_args=False, name=None): @staticmethod def params_size(event_size, name=None): """The number of `params` needed to create a single distribution.""" - with tf.compat.v1.name_scope(name, 'MultivariateNormalTriL_params_size', + with tf1.name_scope(name, 'MultivariateNormalTriL_params_size', [event_size]): return event_size + event_size * (event_size + 1) // 2 @@ -454,7 +455,7 @@ def __init__(self, @staticmethod def new(params, event_size, dtype=None, validate_args=False, name=None): """Create the distribution instance from a `params` vector.""" - with tf.compat.v1.name_scope(name, 'OneHotCategorical', + with tf1.name_scope(name, 'OneHotCategorical', [params, event_size]): return tfd.OneHotCategorical( logits=params, @@ -562,7 +563,7 @@ def __init__(self, def new(params, event_size, num_components, dtype=None, validate_args=False, name=None): """Create the distribution instance from a `params` vector.""" - with tf.compat.v1.name_scope(name, 'CategoricalMixtureOfOneHotCategorical', + with tf1.name_scope(name, 'CategoricalMixtureOfOneHotCategorical', [params, event_size, num_components]): params = tf.convert_to_tensor(value=params, name='params') dist = MixtureSameFamily.new( @@ -586,7 +587,7 @@ def new(params, event_size, num_components, @staticmethod def params_size(event_size, num_components, name=None): """The number of `params` needed to create a single distribution.""" - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'CategoricalMixtureOfOneHotCategorical_params_size', [event_size, num_components]): return MixtureSameFamily.params_size( @@ -696,7 +697,7 @@ def __init__(self, @staticmethod def new(params, event_shape=(), dtype=None, validate_args=False, name=None): """Create the distribution instance from a `params` vector.""" - with tf.compat.v1.name_scope(name, 'IndependentBernoulli', + with tf1.name_scope(name, 'IndependentBernoulli', [params, event_shape]): params = tf.convert_to_tensor(value=params, name='params') event_shape = dist_util.expand_to_vector( @@ -723,7 +724,7 @@ def new(params, event_shape=(), dtype=None, validate_args=False, name=None): @staticmethod def params_size(event_shape=(), name=None): """The number of `params` needed to create a single distribution.""" - with tf.compat.v1.name_scope(name, 'IndependentBernoulli_params_size', + with tf1.name_scope(name, 'IndependentBernoulli_params_size', [event_shape]): event_shape = tf.convert_to_tensor( value=event_shape, name='event_shape', dtype_hint=tf.int32) @@ -754,7 +755,7 @@ def get_config(self): def _eval_all_one_hot(fn, dist, name=None): """OneHotCategorical helper computing probs, cdf, etc over its support.""" - with tf.compat.v1.name_scope(name, 'eval_all_one_hot'): + with tf1.name_scope(name, 'eval_all_one_hot'): event_size = dist.event_shape_tensor()[-1] batch_ndims = tf.size(input=dist.batch_shape_tensor()) # Reshape `eye(d)` to: `[d] + [1]*batch_ndims + [d]`. @@ -834,7 +835,7 @@ def __init__(self, @staticmethod def new(params, event_shape=(), validate_args=False, name=None): """Create the distribution instance from a `params` vector.""" - with tf.compat.v1.name_scope(name, 'IndependentLogistic', + with tf1.name_scope(name, 'IndependentLogistic', [params, event_shape]): params = tf.convert_to_tensor(value=params, name='params') event_shape = dist_util.expand_to_vector( @@ -858,7 +859,7 @@ def new(params, event_shape=(), validate_args=False, name=None): @staticmethod def params_size(event_shape=(), name=None): """The number of `params` needed to create a single distribution.""" - with tf.compat.v1.name_scope(name, 'IndependentLogistic_params_size', + with tf1.name_scope(name, 'IndependentLogistic_params_size', [event_shape]): event_shape = tf.convert_to_tensor( value=event_shape, name='event_shape', dtype_hint=tf.int32) @@ -951,7 +952,7 @@ def __init__(self, @staticmethod def new(params, event_shape=(), validate_args=False, name=None): """Create the distribution instance from a `params` vector.""" - with tf.compat.v1.name_scope(name, 'IndependentNormal', + with tf1.name_scope(name, 'IndependentNormal', [params, event_shape]): params = tf.convert_to_tensor(value=params, name='params') event_shape = dist_util.expand_to_vector( @@ -975,7 +976,7 @@ def new(params, event_shape=(), validate_args=False, name=None): @staticmethod def params_size(event_shape=(), name=None): """The number of `params` needed to create a single distribution.""" - with tf.compat.v1.name_scope(name, 'IndependentNormal_params_size', + with tf1.name_scope(name, 'IndependentNormal_params_size', [event_shape]): event_shape = tf.convert_to_tensor( value=event_shape, name='event_shape', dtype_hint=tf.int32) @@ -1084,7 +1085,7 @@ def __init__(self, @staticmethod def new(params, event_shape=(), validate_args=False, name=None): """Create the distribution instance from a `params` vector.""" - with tf.compat.v1.name_scope(name, 'IndependentPoisson', + with tf1.name_scope(name, 'IndependentPoisson', [params, event_shape]): params = tf.convert_to_tensor(value=params, name='params') event_shape = dist_util.expand_to_vector( @@ -1106,7 +1107,7 @@ def new(params, event_shape=(), validate_args=False, name=None): @staticmethod def params_size(event_shape=(), name=None): """The number of `params` needed to create a single distribution.""" - with tf.compat.v1.name_scope(name, 'IndependentPoisson_params_size', + with tf1.name_scope(name, 'IndependentPoisson_params_size', [event_shape]): event_shape = tf.convert_to_tensor( value=event_shape, name='event_shape', dtype_hint=tf.int32) @@ -1328,7 +1329,7 @@ def kl_divergence_fn(distribution_a, distribution_b): # Closure over: distribution_b, kl_divergence_fn, weight. def _fn(distribution_a): """Closure that computes KLDiv as a function of `a` as in `KL[a, b]`.""" - with tf.compat.v1.name_scope('kldivergence_loss'): + with tf1.name_scope('kldivergence_loss'): # TODO(b/119756336): Due to eager/graph Jacobian graph caching bug # we add here the capability for deferred construction of the prior. # This capability can probably be removed once b/119756336 is resolved. @@ -1428,7 +1429,7 @@ def __init__(self, def new(params, num_components, component_layer, validate_args=False, name=None): """Create the distribution instance from a `params` vector.""" - with tf.compat.v1.name_scope(name, 'MixtureSameFamily', + with tf1.name_scope(name, 'MixtureSameFamily', [params, num_components, component_layer]): params = tf.convert_to_tensor(value=params, name='params') num_components = tf.convert_to_tensor( @@ -1464,7 +1465,7 @@ def params_size(num_components, component_params_size, name=None): params_size: The number of parameters needed to create the mixture distribution. """ - with tf.compat.v1.name_scope(name, 'MixtureSameFamily_params_size', + with tf1.name_scope(name, 'MixtureSameFamily_params_size', [num_components, component_params_size]): num_components = tf.convert_to_tensor( value=num_components, name='num_components', dtype_hint=tf.int32) @@ -1750,7 +1751,7 @@ def __init__( event_shape=(1,), inducing_index_points_initializer=None, unconstrained_observation_noise_variance_initializer=( - tf.compat.v1.initializers.constant(-10.)), + tf1.initializers.constant(-10.)), variational_inducing_observations_scale_initializer=None, mean_fn=None, jitter=1e-6, @@ -1836,7 +1837,7 @@ def build(self, input_shape): if self._mean_fn is None: self.mean = self.add_variable( - initializer=tf.compat.v1.initializers.constant([0.]), + initializer=tf1.initializers.constant([0.]), dtype=self._dtype, name='mean') self._mean_fn = lambda x: self.mean @@ -1863,14 +1864,14 @@ def build(self, input_shape): self._variational_inducing_observations_loc = self.add_variable( name='variational_inducing_observations_loc', shape=self._event_shape.as_list() + [self._num_inducing_points], - initializer=tf.compat.v1.initializers.zeros(), + initializer=tf1.initializers.zeros(), dtype=self._dtype) if self._variational_inducing_observations_scale_initializer is None: eyes = (np.ones(self._event_shape.as_list() + [1, 1]) * np.eye(self._num_inducing_points, dtype=self._dtype)) self._variational_inducing_observations_scale_initializer = ( - tf.compat.v1.initializers.constant(1e-5 * eyes)) + tf1.initializers.constant(1e-5 * eyes)) self._variational_inducing_observations_scale = self.add_variable( name='variational_inducing_observations_scale', shape=(self._event_shape.as_list() + diff --git a/tensorflow_probability/python/layers/distribution_layer_test.py b/tensorflow_probability/python/layers/distribution_layer_test.py index 02b8943b4e..8ed9794f46 100644 --- a/tensorflow_probability/python/layers/distribution_layer_test.py +++ b/tensorflow_probability/python/layers/distribution_layer_test.py @@ -19,18 +19,21 @@ import functools # Dependency imports -import numpy as np -import tensorflow as tf +import numpy as np +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python import layers as tfpl +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import tfk = tf.keras + tfkl = tf.keras.layers -tfb = tfp.bijectors -tfd = tfp.distributions -tfpl = tfp.layers def _logit_avg_expit(t): @@ -49,7 +52,7 @@ def _vec_pad(x, value=0): @test_util.run_all_in_graph_and_eager_modes -class EndToEndTest(tf.test.TestCase): +class EndToEndTest(test_case.TestCase): """Test tfp.layers work in all three Keras APIs. For end-to-end tests we fit a Variational Autoencoder (VAE) because this @@ -128,7 +131,7 @@ def accuracy(x, rv_x): axis=tf.range(-rv_x.event_shape.ndims, 0)) vae_model.compile( - optimizer=tf.compat.v2.optimizers.Adam(learning_rate=0.5), + optimizer=tf.optimizers.Adam(learning_rate=0.5), loss=lambda x, rv_x: -rv_x.log_prob(x), metrics=[accuracy]) vae_model.fit(self.x, self.x, @@ -168,7 +171,7 @@ def test_keras_functional_api(self): vae_model = tfk.Model(inputs=images, outputs=decoded) vae_model.compile( - optimizer=tf.compat.v2.optimizers.Adam(), + optimizer=tf.optimizers.Adam(), loss=lambda x, rv_x: -rv_x.log_prob(x), metrics=[]) vae_model.fit(self.x, self.x, @@ -226,7 +229,7 @@ def call(self, inputs): vae_model = tfk.Model(inputs=images, outputs=decoded) vae_model.compile( - optimizer=tf.compat.v2.optimizers.Adam(), + optimizer=tf.optimizers.Adam(), loss=lambda x, rv_x: -rv_x.log_prob(x), metrics=[]) vae_model.fit(self.x, self.x, @@ -267,7 +270,7 @@ def test_keras_sequential_api_multiple_draws(self): inputs=encoder_model.inputs, outputs=decoder_model(encoder_model.outputs[0])) vae_model.compile( - optimizer=tf.compat.v2.optimizers.Adam(), + optimizer=tf.optimizers.Adam(), loss=lambda x, rv_x: -rv_x.log_prob(x), metrics=[]) vae_model.fit(self.x, self.x, @@ -281,7 +284,7 @@ def test_keras_sequential_api_multiple_draws(self): @test_util.run_all_in_graph_and_eager_modes -class DistributionLambdaSerializationTest(tf.test.TestCase): +class DistributionLambdaSerializationTest(test_case.TestCase): def assertSerializable(self, model, batch_size=1): """Assert that a model can be saved/loaded via Keras Model.save/load_model. @@ -399,7 +402,7 @@ def make_distribution(t): @test_util.run_all_in_graph_and_eager_modes -class DistributionLambdaVariableCreation(tf.test.TestCase): +class DistributionLambdaVariableCreation(test_case.TestCase): def test_variable_creation(self): conv1 = tfkl.Convolution2D(filters=1, kernel_size=[1, 3]) @@ -419,7 +422,7 @@ def test_variable_creation(self): model = tfk.Model(x, normal(x)) # pylint: disable=unused-variable model.compile( - optimizer=tf.compat.v2.optimizers.Adam(), + optimizer=tf.optimizers.Adam(), loss=lambda x, rv_x: -rv_x.log_prob(x), metrics=[]) @@ -433,7 +436,7 @@ def test_variable_creation(self): @test_util.run_all_in_graph_and_eager_modes -class KLDivergenceAddLoss(tf.test.TestCase): +class KLDivergenceAddLoss(test_case.TestCase): def test_approx_kl(self): # TODO(b/120320323): Enable this test in eager. @@ -469,7 +472,7 @@ def test_approx_kl(self): self.assertNear(actual_kl_, approx_kl_, err=0.15) model.compile( - optimizer=tf.compat.v2.optimizers.Adam(), + optimizer=tf.optimizers.Adam(), loss=lambda x, dist: -dist.log_prob(x[0, :event_size]), metrics=[]) model.fit(x, x, @@ -479,7 +482,7 @@ def test_approx_kl(self): @test_util.run_all_in_graph_and_eager_modes -class MultivariateNormalTriLTest(tf.test.TestCase): +class MultivariateNormalTriLTest(test_case.TestCase): def _check_distribution(self, t, x): self.assertIsInstance(x, tfd.MultivariateNormalTriL) @@ -530,7 +533,7 @@ def test_doc_string(self): # Fit. model.compile( - optimizer=tf.compat.v2.optimizers.Adam(), + optimizer=tf.optimizers.Adam(), loss=lambda y, model: -model.log_prob(y), metrics=[]) batch_size = 100 @@ -545,7 +548,7 @@ def test_doc_string(self): @test_util.run_all_in_graph_and_eager_modes -class OneHotCategoricalTest(tf.test.TestCase): +class OneHotCategoricalTest(test_case.TestCase): def _check_distribution(self, t, x): self.assertIsInstance(x, tfd.OneHotCategorical) @@ -590,7 +593,7 @@ def test_doc_string(self): # Fit. model.compile( - optimizer=tf.compat.v2.optimizers.Adam(learning_rate=0.5), + optimizer=tf.optimizers.Adam(learning_rate=0.5), loss=lambda y, model: -model.log_prob(y), metrics=[]) batch_size = 100 @@ -602,7 +605,7 @@ def test_doc_string(self): @test_util.run_all_in_graph_and_eager_modes -class CategoricalMixtureOfOneHotCategoricalTest(tf.test.TestCase): +class CategoricalMixtureOfOneHotCategoricalTest(test_case.TestCase): def _check_distribution(self, t, x): self.assertIsInstance(x, tfd.MixtureSameFamily) @@ -670,7 +673,7 @@ def test_doc_string(self): # Fit. model.compile( - optimizer=tf.compat.v2.optimizers.Adam(learning_rate=0.5), + optimizer=tf.optimizers.Adam(learning_rate=0.5), loss=lambda y, model: -model.log_prob(y), metrics=[]) batch_size = 100 @@ -712,7 +715,7 @@ def _build_tensor(self, ndarray, dtype=None): # Enforce parameterized dtype and static/dynamic testing. ndarray = np.asarray(ndarray).astype( dtype if dtype is not None else self.dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) def _check_distribution(self, t, x, batch_shape): @@ -815,14 +818,14 @@ def _distribution_to_params(self, distribution, batch_shape): @test_util.run_all_in_graph_and_eager_modes -class IndependentBernoulliTestDynamicShape(tf.test.TestCase, +class IndependentBernoulliTestDynamicShape(test_case.TestCase, _IndependentBernoulliTest): dtype = np.float64 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes -class IndependentBernoulliTestStaticShape(tf.test.TestCase, +class IndependentBernoulliTestStaticShape(test_case.TestCase, _IndependentBernoulliTest): dtype = np.float32 use_static_shape = True @@ -849,7 +852,7 @@ def test_doc_string(self): # Fit. model.compile( - optimizer=tf.compat.v2.optimizers.Adam(learning_rate=0.5), + optimizer=tf.optimizers.Adam(learning_rate=0.5), loss=lambda y, model: -model.log_prob(y)) batch_size = 10000 model.fit(x, y, @@ -876,14 +879,14 @@ def _distribution_to_params(self, distribution, batch_shape): @test_util.run_all_in_graph_and_eager_modes -class IndependentLogisticTestDynamicShape(tf.test.TestCase, +class IndependentLogisticTestDynamicShape(test_case.TestCase, _IndependentLogisticTest): dtype = np.float32 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes -class IndependentLogisticTestStaticShape(tf.test.TestCase, +class IndependentLogisticTestStaticShape(test_case.TestCase, _IndependentLogisticTest): dtype = np.float64 use_static_shape = True @@ -961,14 +964,14 @@ def reshape(x): @test_util.run_all_in_graph_and_eager_modes -class IndependentNormalTestDynamicShape(tf.test.TestCase, +class IndependentNormalTestDynamicShape(test_case.TestCase, _IndependentNormalTest): dtype = np.float32 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes -class IndependentNormalTestStaticShape(tf.test.TestCase, +class IndependentNormalTestStaticShape(test_case.TestCase, _IndependentNormalTest): dtype = np.float64 use_static_shape = True @@ -1008,14 +1011,14 @@ def _distribution_to_params(self, distribution, batch_shape): @test_util.run_all_in_graph_and_eager_modes -class IndependentPoissonTestDynamicShape(tf.test.TestCase, +class IndependentPoissonTestDynamicShape(test_case.TestCase, _IndependentPoissonTest): dtype = np.float32 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes -class IndependentPoissonTestStaticShape(tf.test.TestCase, +class IndependentPoissonTestStaticShape(test_case.TestCase, _IndependentPoissonTest): dtype = np.float64 use_static_shape = True @@ -1037,7 +1040,7 @@ def test_doc_string(self): # Fit. model.compile( - optimizer=tf.compat.v2.optimizers.Adam(learning_rate=0.05), + optimizer=tf.optimizers.Adam(learning_rate=0.05), loss=lambda y, model: -model.log_prob(y), metrics=[]) batch_size = 50 @@ -1070,7 +1073,7 @@ def _build_tensor(self, ndarray, dtype=None): # Enforce parameterized dtype and static/dynamic testing. ndarray = np.asarray(ndarray).astype( dtype if dtype is not None else self.dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) def _check_distribution(self, t, x, batch_shape): @@ -1218,7 +1221,7 @@ def test_doc_string(self): # Fit. batch_size = 100 model.compile( - optimizer=tf.compat.v2.optimizers.Adam(learning_rate=0.02), + optimizer=tf.optimizers.Adam(learning_rate=0.02), loss=lambda y, model: -model.log_prob(y)) model.fit(x, y, batch_size=batch_size, @@ -1229,14 +1232,14 @@ def test_doc_string(self): @test_util.run_all_in_graph_and_eager_modes -class MixtureLogisticTestDynamicShape(tf.test.TestCase, +class MixtureLogisticTestDynamicShape(test_case.TestCase, _MixtureLogisticTest): dtype = np.float64 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes -class MixtureLogisticTestStaticShape(tf.test.TestCase, +class MixtureLogisticTestStaticShape(test_case.TestCase, _MixtureLogisticTest): dtype = np.float32 use_static_shape = True @@ -1288,7 +1291,7 @@ def test_doc_string(self): # Fit. batch_size = 100 model.compile( - optimizer=tf.compat.v2.optimizers.Adam(learning_rate=0.02), + optimizer=tf.optimizers.Adam(learning_rate=0.02), loss=lambda y, model: -model.log_prob(y)) model.fit(x, y, batch_size=batch_size, @@ -1299,14 +1302,14 @@ def test_doc_string(self): @test_util.run_all_in_graph_and_eager_modes -class MixtureNormalTestDynamicShape(tf.test.TestCase, +class MixtureNormalTestDynamicShape(test_case.TestCase, _MixtureNormalTest): dtype = np.float32 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes -class MixtureNormalTestStaticShape(tf.test.TestCase, +class MixtureNormalTestStaticShape(test_case.TestCase, _MixtureNormalTest): dtype = np.float64 use_static_shape = True @@ -1319,7 +1322,7 @@ def _build_tensor(self, ndarray, dtype=None): # Enforce parameterized dtype and static/dynamic testing. ndarray = np.asarray(ndarray).astype( dtype if dtype is not None else self.dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) def _check_distribution(self, t, x, batch_shape): @@ -1404,7 +1407,7 @@ def test_doc_string(self): # Fit. batch_size = 100 model.compile( - optimizer=tf.compat.v2.optimizers.Adam(learning_rate=0.02), + optimizer=tf.optimizers.Adam(learning_rate=0.02), loss=lambda y, model: -model.log_prob(y)) model.fit(x, y, batch_size=batch_size, @@ -1415,21 +1418,21 @@ def test_doc_string(self): @test_util.run_all_in_graph_and_eager_modes -class MixtureSameFamilyTestDynamicShape(tf.test.TestCase, +class MixtureSameFamilyTestDynamicShape(test_case.TestCase, _MixtureSameFamilyTest): dtype = np.float32 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes -class MixtureSameFamilyTestStaticShape(tf.test.TestCase, +class MixtureSameFamilyTestStaticShape(test_case.TestCase, _MixtureSameFamilyTest): dtype = np.float64 use_static_shape = True @test_util.run_all_in_graph_and_eager_modes -class VariationalGaussianProcessEndToEnd(tf.test.TestCase): +class VariationalGaussianProcessEndToEnd(test_case.TestCase): def testEndToEnd(self): np.random.seed(43) @@ -1455,7 +1458,7 @@ def __init__(self, **kwargs): super(KernelFn, self).__init__(**kwargs) self._amplitude = self.add_variable( - initializer=tf.compat.v1.initializers.constant(.54), + initializer=tf1.initializers.constant(.54), dtype=dtype, name='amplitude') @@ -1472,7 +1475,7 @@ def kernel(self): # Add a leading dimension for the event_shape. eyes = np.expand_dims(np.eye(num_inducing_points), 0) variational_inducing_observations_scale_initializer = ( - tf.compat.v1.initializers.constant(1e-3 * eyes)) + tf1.initializers.constant(1e-3 * eyes)) model = tf.keras.Sequential([ tf.keras.layers.InputLayer(input_shape=[1], dtype=dtype), @@ -1482,7 +1485,7 @@ def kernel(self): num_inducing_points=num_inducing_points, kernel_provider=KernelFn(dtype=dtype), inducing_index_points_initializer=( - tf.compat.v1.initializers.constant( + tf1.initializers.constant( np.linspace(*x_range, num=num_inducing_points, dtype=dtype)[..., np.newaxis])), @@ -1491,13 +1494,13 @@ def kernel(self): ]) if not tf.executing_eagerly(): - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) batch_size = 64 kl_weight = np.float64(batch_size) / n loss = lambda y, d: d.variational_loss(y, kl_weight=kl_weight) model.compile( - optimizer=tf.compat.v2.optimizers.Adam(learning_rate=0.02), + optimizer=tf.optimizers.Adam(learning_rate=0.02), loss=loss) # This should have no issues @@ -1511,7 +1514,7 @@ def kernel(self): @test_util.run_all_in_graph_and_eager_modes -class JointDistributionLayer(tf.test.TestCase): +class JointDistributionLayer(test_case.TestCase): def test_works(self): x = tf.keras.Input(shape=()) diff --git a/tensorflow_probability/python/layers/initializers.py b/tensorflow_probability/python/layers/initializers.py index ad146954c8..344c0fbd25 100644 --- a/tensorflow_probability/python/layers/initializers.py +++ b/tensorflow_probability/python/layers/initializers.py @@ -21,7 +21,8 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf class BlockwiseInitializer(tf.keras.initializers.Initializer): @@ -84,7 +85,7 @@ def __call__(self, shape, dtype=None): if sum(sizes_) != n: raise ValueError(message) elif self.validate_args: - assertions.append(tf.compat.v1.assert_equal( + assertions.append(tf1.assert_equal( shape[-1], tf.reduce_sum(input_tensor=self.sizes), message=message)) s = (shape[:-1] @@ -107,7 +108,7 @@ def get_config(self): """Returns initializer configuration as a JSON-serializable dict.""" return { 'initializers': [ - tf.compat.v2.initializers.serialize( + tf.initializers.serialize( tf.keras.initializers.get(init)) for init in self.initializers ], @@ -119,7 +120,7 @@ def get_config(self): def from_config(cls, config): """Instantiates an initializer from a configuration dictionary.""" return cls(**{ - 'initializers': [tf.compat.v2.initializers.deserialize(init) + 'initializers': [tf.initializers.deserialize(init) for init in config.get('initializers', [])], 'sizes': config.get('sizes', []), 'validate_args': config.get('validate_args', False), diff --git a/tensorflow_probability/python/layers/initializers_test.py b/tensorflow_probability/python/layers/initializers_test.py index a0e4830f14..242a7d6c7b 100644 --- a/tensorflow_probability/python/layers/initializers_test.py +++ b/tensorflow_probability/python/layers/initializers_test.py @@ -21,14 +21,14 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class BlockwiseInitializerTest(tf.test.TestCase): +class BlockwiseInitializerTest(test_case.TestCase): def test_works_correctly(self): init = tfp.layers.BlockwiseInitializer(['glorot_uniform', 'zeros'], [3, 4]) @@ -38,9 +38,9 @@ def test_works_correctly(self): self.assertAllEqual(np.zeros([2, 1, 4]), x_[..., 3:]) def test_de_serialization(self): - s = tf.compat.v2.initializers.serialize( + s = tf.initializers.serialize( tfp.layers.BlockwiseInitializer(['glorot_uniform', 'zeros'], [3, 4])) - init_clone = tf.compat.v2.initializers.deserialize(s) + init_clone = tf.initializers.deserialize(s) x = init_clone([2, 1, 7]) self.assertEqual((2, 1, 7), x.shape) x_ = self.evaluate(x) diff --git a/tensorflow_probability/python/layers/internal/BUILD b/tensorflow_probability/python/layers/internal/BUILD index 7fe5a5303e..a9deba4bcf 100644 --- a/tensorflow_probability/python/layers/internal/BUILD +++ b/tensorflow_probability/python/layers/internal/BUILD @@ -58,6 +58,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/layers/internal/distribution_tensor_coercible_test.py b/tensorflow_probability/python/layers/internal/distribution_tensor_coercible_test.py index 48cfe10376..e2bc58a361 100644 --- a/tensorflow_probability/python/layers/internal/distribution_tensor_coercible_test.py +++ b/tensorflow_probability/python/layers/internal/distribution_tensor_coercible_test.py @@ -20,17 +20,18 @@ import operator # Dependency imports + from absl.testing import parameterized import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.layers.internal import distribution_tensor_coercible -tfb = tfp.bijectors -tfd = tfp.distributions from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + dtc = distribution_tensor_coercible @@ -64,7 +65,7 @@ def __add__(self, x): @test_util.run_all_in_graph_and_eager_modes class DistributionTensorConversionTest( - tf.test.TestCase, parameterized.TestCase): + test_case.TestCase, parameterized.TestCase): def testErrorsByDefault(self): x = tfd.Normal(loc=0., scale=1.) @@ -273,7 +274,7 @@ def testWhileLoopWithControlFlowV2(self): @test_util.run_all_in_graph_and_eager_modes -class MemoryLeakTest(tf.test.TestCase): +class MemoryLeakTest(test_case.TestCase): def testTypeObjectLeakage(self): if not tf.executing_eagerly(): diff --git a/tensorflow_probability/python/layers/internal/tensor_tuple.py b/tensorflow_probability/python/layers/internal/tensor_tuple.py index 3a19763d92..8a2051140c 100644 --- a/tensorflow_probability/python/layers/internal/tensor_tuple.py +++ b/tensorflow_probability/python/layers/internal/tensor_tuple.py @@ -18,7 +18,7 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow.python.framework import composite_tensor # pylint: disable=g-direct-tensorflow-import from tensorflow.python.framework import type_spec # pylint: disable=g-direct-tensorflow-import diff --git a/tensorflow_probability/python/layers/internal/tensor_tuple_test.py b/tensorflow_probability/python/layers/internal/tensor_tuple_test.py index 832a78843b..062a173e8b 100644 --- a/tensorflow_probability/python/layers/internal/tensor_tuple_test.py +++ b/tensorflow_probability/python/layers/internal/tensor_tuple_test.py @@ -20,7 +20,7 @@ import re -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.layers.internal import tensor_tuple diff --git a/tensorflow_probability/python/layers/masked_autoregressive.py b/tensorflow_probability/python/layers/masked_autoregressive.py index 5f658756ea..69795aae92 100644 --- a/tensorflow_probability/python/layers/masked_autoregressive.py +++ b/tensorflow_probability/python/layers/masked_autoregressive.py @@ -19,7 +19,7 @@ from __future__ import print_function # Dependency imports -import tensorflow as tf +import tensorflow.compat.v2 as tf # By importing `distributions` as `tfd`, docstrings will show # `tfd.Distribution`. We import `bijectors` the same way, for consistency. diff --git a/tensorflow_probability/python/layers/masked_autoregressive_test.py b/tensorflow_probability/python/layers/masked_autoregressive_test.py index a7e74dce90..8313b14ab8 100644 --- a/tensorflow_probability/python/layers/masked_autoregressive_test.py +++ b/tensorflow_probability/python/layers/masked_autoregressive_test.py @@ -17,21 +17,22 @@ from __future__ import print_function # Dependency imports + import numpy as np -import tensorflow as tf -import tensorflow_probability as tfp +import tensorflow.compat.v2 as tf +from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python import layers as tfpl +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import tfk = tf.keras tfkl = tf.keras.layers -tfb = tfp.bijectors -tfd = tfp.distributions -tfpl = tfp.layers @test_util.run_all_in_graph_and_eager_modes -class AutoregressiveTransformTest(tf.test.TestCase): +class AutoregressiveTransformTest(test_case.TestCase): def test_doc_string(self): # Generate data -- as in Figure 1 in [Papamakarios et al. (2017)][1]). @@ -64,7 +65,7 @@ def test_doc_string(self): ]) model.compile( - optimizer=tf.compat.v2.optimizers.Adam(), + optimizer=tf.optimizers.Adam(), loss=lambda y, rv_y: -rv_y.log_prob(y)) model.fit(x=np.zeros((n, 0)), diff --git a/tensorflow_probability/python/layers/util.py b/tensorflow_probability/python/layers/util.py index 39b39e8768..9b4c741e98 100644 --- a/tensorflow_probability/python/layers/util.py +++ b/tensorflow_probability/python/layers/util.py @@ -21,7 +21,8 @@ import types # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python import util as util @@ -39,8 +40,8 @@ def default_loc_scale_fn( is_singular=False, - loc_initializer=tf.compat.v1.initializers.random_normal(stddev=0.1), - untransformed_scale_initializer=tf.compat.v1.initializers.random_normal( + loc_initializer=tf1.initializers.random_normal(stddev=0.1), + untransformed_scale_initializer=tf1.initializers.random_normal( mean=-3., stddev=0.1), loc_regularizer=None, untransformed_scale_regularizer=None, @@ -120,8 +121,8 @@ def _fn(dtype, shape, name, trainable, add_variable_fn): def default_mean_field_normal_fn( is_singular=False, - loc_initializer=tf.compat.v1.initializers.random_normal(stddev=0.1), - untransformed_scale_initializer=tf.compat.v1.initializers.random_normal( + loc_initializer=tf1.initializers.random_normal(stddev=0.1), + untransformed_scale_initializer=tf1.initializers.random_normal( mean=-3., stddev=0.1), loc_regularizer=None, untransformed_scale_regularizer=None, diff --git a/tensorflow_probability/python/layers/variable_input.py b/tensorflow_probability/python/layers/variable_input.py index cf29147f70..63dd74355a 100644 --- a/tensorflow_probability/python/layers/variable_input.py +++ b/tensorflow_probability/python/layers/variable_input.py @@ -21,7 +21,7 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf class VariableLayer(tf.keras.layers.Layer): diff --git a/tensorflow_probability/python/layers/variable_input_test.py b/tensorflow_probability/python/layers/variable_input_test.py index b2d29a0d5a..3620ba1686 100644 --- a/tensorflow_probability/python/layers/variable_input_test.py +++ b/tensorflow_probability/python/layers/variable_input_test.py @@ -18,16 +18,16 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions - @test_util.run_all_in_graph_and_eager_modes -class VariableInputLayerTest(tf.test.TestCase): +class VariableInputLayerTest(test_case.TestCase): def test_sequential_api(self): # Create a trainable distribution using the Sequential API. diff --git a/tensorflow_probability/python/layers/weight_norm_test.py b/tensorflow_probability/python/layers/weight_norm_test.py index 034e69bfa2..d07f1c7b5e 100644 --- a/tensorflow_probability/python/layers/weight_norm_test.py +++ b/tensorflow_probability/python/layers/weight_norm_test.py @@ -25,15 +25,16 @@ import numpy as np import tensorflow.compat.v2 as tf +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.layers import weight_norm from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import - tfk = tf.keras + tfkl = tf.keras.layers @test_util.run_all_in_graph_and_eager_modes -class WeightNormTest(tf.test.TestCase, parameterized.TestCase): +class WeightNormTest(test_case.TestCase, parameterized.TestCase): def setUp(self): super(WeightNormTest, self).setUp() diff --git a/tensorflow_probability/python/math/BUILD b/tensorflow_probability/python/math/BUILD index ff8c8baa2a..11cd0e46f9 100644 --- a/tensorflow_probability/python/math/BUILD +++ b/tensorflow_probability/python/math/BUILD @@ -69,6 +69,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -118,6 +119,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -145,6 +147,7 @@ py_test( # scipy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -177,6 +180,7 @@ py_test( # tensorflow dep, "//tensorflow_probability", "//tensorflow_probability/python/internal:hypothesis_testlib", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -200,6 +204,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -250,6 +255,7 @@ py_test( # tensorflow dep, "//tensorflow_probability", "//tensorflow_probability/python/internal:dtype_util", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -273,6 +279,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -296,6 +303,7 @@ py_test( # scipy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -319,5 +327,6 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/math/custom_gradient_test.py b/tensorflow_probability/python/math/custom_gradient_test.py index e48021ab6f..0233e178a5 100644 --- a/tensorflow_probability/python/math/custom_gradient_test.py +++ b/tensorflow_probability/python/math/custom_gradient_test.py @@ -24,11 +24,13 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class CustomGradientTest(tf.test.TestCase): +class CustomGradientTest(test_case.TestCase): def test_works_correctly(self): f = lambda x: x**2 / 2 diff --git a/tensorflow_probability/python/math/diag_jacobian_test.py b/tensorflow_probability/python/math/diag_jacobian_test.py index dee2eb7a7b..e1db948447 100644 --- a/tensorflow_probability/python/math/diag_jacobian_test.py +++ b/tensorflow_probability/python/math/diag_jacobian_test.py @@ -19,19 +19,18 @@ from __future__ import print_function # Dependency imports -import numpy as np +import numpy as np import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions - - @test_util.run_all_in_graph_and_eager_modes -class JacobianTest(tf.test.TestCase): +class JacobianTest(test_case.TestCase): def testJacobianDiagonal3DListInput(self): """Tests that the diagonal of the Jacobian matrix computes correctly.""" diff --git a/tensorflow_probability/python/math/gradient_test.py b/tensorflow_probability/python/math/gradient_test.py index 569650d59f..65f52083e0 100644 --- a/tensorflow_probability/python/math/gradient_test.py +++ b/tensorflow_probability/python/math/gradient_test.py @@ -24,11 +24,13 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class GradientTest(tf.test.TestCase): +class GradientTest(test_case.TestCase): def test_non_list(self): f = lambda x: x**2 / 2 diff --git a/tensorflow_probability/python/math/interpolation_test.py b/tensorflow_probability/python/math/interpolation_test.py index 8b7683a0d7..d0f0cdfd83 100644 --- a/tensorflow_probability/python/math/interpolation_test.py +++ b/tensorflow_probability/python/math/interpolation_test.py @@ -23,11 +23,13 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class InterpRegular1DGridTest(tf.test.TestCase): +class InterpRegular1DGridTest(test_case.TestCase): """Test for tfp.math.interp_regular_1d_grid.""" def setUp(self): @@ -293,7 +295,7 @@ def test_gradients_and_propagation_of_nan_in_x(self): @test_util.run_all_in_graph_and_eager_modes -class BatchInterpRegular1DGridTest(tf.test.TestCase): +class BatchInterpRegular1DGridTest(test_case.TestCase): """Test for 1-D usage of tfp.math.interp_regular_1d_grid.""" def setUp(self): @@ -572,7 +574,7 @@ def test_gradients_and_propagation_of_nan_in_x(self): @test_util.run_all_in_graph_and_eager_modes -class BatchInterpRegularNDGridTest(tf.test.TestCase): +class BatchInterpRegularNDGridTest(test_case.TestCase): def test_2d_scalar_valued_no_leading_dims(self): y_ref = [[0., 1.], [2., 3.]] diff --git a/tensorflow_probability/python/math/linalg_test.py b/tensorflow_probability/python/math/linalg_test.py index adcf3098eb..99601320a8 100644 --- a/tensorflow_probability/python/math/linalg_test.py +++ b/tensorflow_probability/python/math/linalg_test.py @@ -29,11 +29,11 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps - +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top -class _CholeskyExtend(tf.test.TestCase): +class _CholeskyExtend(test_case.TestCase): def testCholeskyExtension(self): xs = np.random.random(7).astype(self.dtype)[:, tf.newaxis] @@ -112,7 +112,7 @@ class CholeskyExtend64Dynamic(_CholeskyExtend): del _CholeskyExtend -class _PivotedCholesky(tf.test.TestCase, parameterized.TestCase): +class _PivotedCholesky(test_case.TestCase, parameterized.TestCase): def _random_batch_psd(self, dim): matrix = np.random.random([2, dim, dim]) @@ -285,12 +285,12 @@ def test_batch(self): @test_util.run_all_in_graph_and_eager_modes -class LUReconstructStatic(tf.test.TestCase, _LUReconstruct): +class LUReconstructStatic(test_case.TestCase, _LUReconstruct): use_static_shape = True @test_util.run_all_in_graph_and_eager_modes -class LUReconstructDynamic(tf.test.TestCase, _LUReconstruct): +class LUReconstructDynamic(test_case.TestCase, _LUReconstruct): use_static_shape = False @@ -333,12 +333,12 @@ def test_batch(self): @test_util.run_all_in_graph_and_eager_modes -class LUMatrixInverseStatic(tf.test.TestCase, _LUMatrixInverse): +class LUMatrixInverseStatic(test_case.TestCase, _LUMatrixInverse): use_static_shape = True @test_util.run_all_in_graph_and_eager_modes -class LUMatrixInverseDynamic(tf.test.TestCase, _LUMatrixInverse): +class LUMatrixInverseDynamic(test_case.TestCase, _LUMatrixInverse): use_static_shape = False @@ -398,12 +398,12 @@ def test_batch_broadcast(self): @test_util.run_all_in_graph_and_eager_modes -class LUSolveStatic(tf.test.TestCase, _LUSolve): +class LUSolveStatic(test_case.TestCase, _LUSolve): use_static_shape = True @test_util.run_all_in_graph_and_eager_modes -class LUSolveDynamic(tf.test.TestCase, _LUSolve): +class LUSolveDynamic(test_case.TestCase, _LUSolve): use_static_shape = False @@ -507,29 +507,30 @@ def test_batch_matvecmul(self): @test_util.run_all_in_graph_and_eager_modes -class SparseOrDenseMatmulStatic(tf.test.TestCase, _SparseOrDenseMatmul): +class SparseOrDenseMatmulStatic(test_case.TestCase, _SparseOrDenseMatmul): use_static_shape = True @test_util.run_all_in_graph_and_eager_modes -class SparseOrDenseMatmulDynamic(tf.test.TestCase, _SparseOrDenseMatmul): +class SparseOrDenseMatmulDynamic(test_case.TestCase, _SparseOrDenseMatmul): use_static_shape = False @test_util.run_all_in_graph_and_eager_modes -class SparseOrDenseMatmulStaticSparse(tf.test.TestCase, _SparseOrDenseMatmul): +class SparseOrDenseMatmulStaticSparse(test_case.TestCase, _SparseOrDenseMatmul): use_static_shape = True use_sparse_tensor = True @test_util.run_all_in_graph_and_eager_modes -class SparseOrDenseMatmulDynamicSparse(tf.test.TestCase, _SparseOrDenseMatmul): +class SparseOrDenseMatmulDynamicSparse(test_case.TestCase, + _SparseOrDenseMatmul): use_static_shape = False use_sparse_tensor = True @test_util.run_all_in_graph_and_eager_modes -class FillTriangularTest(tf.test.TestCase): +class FillTriangularTest(test_case.TestCase): def _fill_triangular(self, x, upper=False): """Numpy implementation of `fill_triangular`.""" diff --git a/tensorflow_probability/python/math/minimize_test.py b/tensorflow_probability/python/math/minimize_test.py index 1d2869b0b3..622301e98f 100644 --- a/tensorflow_probability/python/math/minimize_test.py +++ b/tensorflow_probability/python/math/minimize_test.py @@ -25,11 +25,13 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class MinimizeTests(tf.test.TestCase): +class MinimizeTests(test_case.TestCase): def test_custom_trace_fn(self): diff --git a/tensorflow_probability/python/math/ode/BUILD b/tensorflow_probability/python/math/ode/BUILD index 527571cbfe..bb92add32a 100644 --- a/tensorflow_probability/python/math/ode/BUILD +++ b/tensorflow_probability/python/math/ode/BUILD @@ -70,6 +70,7 @@ py_test( # absl/testing:parameterized dep, # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) @@ -92,6 +93,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -113,5 +115,6 @@ py_test( # absl/testing:parameterized dep, # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/math/ode/bdf_util_test.py b/tensorflow_probability/python/math/ode/bdf_util_test.py index 5bf9fd5028..51c4fd26e8 100644 --- a/tensorflow_probability/python/math/ode/bdf_util_test.py +++ b/tensorflow_probability/python/math/ode/bdf_util_test.py @@ -20,10 +20,10 @@ from absl.testing import parameterized import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.math.ode import bdf_util - from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @@ -33,7 +33,7 @@ ('float64', tf.float64), ('complex128', tf.complex128), ]) -class BDFUtilTest(parameterized.TestCase, tf.test.TestCase): +class BDFUtilTest(parameterized.TestCase, test_case.TestCase): def test_first_step_size_is_large_when_ode_fn_is_constant(self, dtype): initial_state_vec = tf.constant([1.], dtype=dtype) diff --git a/tensorflow_probability/python/math/ode/ode_test.py b/tensorflow_probability/python/math/ode/ode_test.py index e13e35d45e..1744037129 100644 --- a/tensorflow_probability/python/math/ode/ode_test.py +++ b/tensorflow_probability/python/math/ode/ode_test.py @@ -20,18 +20,19 @@ from absl.testing import parameterized import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import +from tensorflow_probability.python.internal import test_case +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import _RTOL = 1e-8 _ATOL = 1e-12 @test_util.run_all_in_graph_and_eager_modes @parameterized.named_parameters([('bdf', tfp.math.ode.BDF)]) -class NonStiffTest(parameterized.TestCase, tf.test.TestCase): +class NonStiffTest(parameterized.TestCase, test_case.TestCase): def test_zero_dims(self, solver): ode_fn = lambda time, state: -state @@ -182,7 +183,7 @@ def test_riccati(self, solver): @test_util.run_all_in_graph_and_eager_modes @parameterized.named_parameters([('bdf', tfp.math.ode.BDF)]) -class StiffTest(parameterized.TestCase, tf.test.TestCase): +class StiffTest(parameterized.TestCase, test_case.TestCase): def test_van_der_pol(self, solver): @@ -212,7 +213,7 @@ def jacobian_fn(_, state): @parameterized.named_parameters([('bdf', tfp.math.ode.BDF)]) -class GradientTest(parameterized.TestCase, tf.test.TestCase): +class GradientTest(parameterized.TestCase, test_case.TestCase): def test_linear_dense(self, solver): initial_time = 0. @@ -267,7 +268,7 @@ def test_riccati(self, solver): @test_util.run_all_in_graph_and_eager_modes @parameterized.named_parameters([('bdf', tfp.math.ode.BDF)]) -class GeneralTest(parameterized.TestCase, tf.test.TestCase): +class GeneralTest(parameterized.TestCase, test_case.TestCase): def test_bad_initial_state_dtype(self, solver): ode_fn = lambda time, state: -state diff --git a/tensorflow_probability/python/math/ode/util_test.py b/tensorflow_probability/python/math/ode/util_test.py index b98b2594cd..7855f68bde 100644 --- a/tensorflow_probability/python/math/ode/util_test.py +++ b/tensorflow_probability/python/math/ode/util_test.py @@ -22,8 +22,8 @@ import numpy as np import tensorflow.compat.v2 as tf +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.math.ode import util - from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @@ -34,7 +34,7 @@ ('use_automatic_differentiation', True, False), ('use_automatic_differentiation_and_pfor', True, True), ]) -class JacobianTest(tf.test.TestCase): +class JacobianTest(test_case.TestCase): def test_right_mult_by_jacobian_mat(self, use_automatic_differentiation, use_pfor): diff --git a/tensorflow_probability/python/math/psd_kernels/BUILD b/tensorflow_probability/python/math/psd_kernels/BUILD index 6dbe7a8492..7fc7a8f358 100644 --- a/tensorflow_probability/python/math/psd_kernels/BUILD +++ b/tensorflow_probability/python/math/psd_kernels/BUILD @@ -60,6 +60,7 @@ py_test( # absl/testing:parameterized dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/math/psd_kernels/internal:util", ], ) @@ -83,6 +84,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -106,6 +108,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -129,6 +132,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -151,6 +155,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -173,6 +178,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -198,6 +204,7 @@ py_test( # scipy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -220,6 +227,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -243,6 +251,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -265,6 +274,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -279,10 +289,22 @@ PSD_KERNEL_PROPERTIES_DEPS = [ "//tensorflow_probability", "//tensorflow_probability/python/internal:assert_util", "//tensorflow_probability/python/internal:hypothesis_testlib", + "//tensorflow_probability/python/math/psd_kernels:hypothesis_testlib", "//tensorflow_probability/python/internal:tensor_util", "//tensorflow_probability/python/internal:tensorshape_util", + "//tensorflow_probability/python/internal:test_case", ] +py_library( + name = "hypothesis_testlib", + testonly = True, + srcs = ["hypothesis_testlib.py"], + deps = [ + # hypothesis dep, + # numpy dep, + ], +) + py_test( name = "psd_kernel_properties_test_eager", size = "medium", diff --git a/tensorflow_probability/python/math/psd_kernels/exp_sin_squared_test.py b/tensorflow_probability/python/math/psd_kernels/exp_sin_squared_test.py index 0f74b9edc0..6b82b45fd8 100644 --- a/tensorflow_probability/python/math/psd_kernels/exp_sin_squared_test.py +++ b/tensorflow_probability/python/math/psd_kernels/exp_sin_squared_test.py @@ -24,11 +24,13 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class ExpSinSquaredTest(tf.test.TestCase, parameterized.TestCase): +class ExpSinSquaredTest(test_case.TestCase, parameterized.TestCase): def testMismatchedFloatTypesAreBad(self): tfp.math.psd_kernels.ExpSinSquared(1, 1) # Should be OK (float32 fallback). diff --git a/tensorflow_probability/python/math/psd_kernels/exponentiated_quadratic_test.py b/tensorflow_probability/python/math/psd_kernels/exponentiated_quadratic_test.py index 21c2646b5b..19eba843e7 100644 --- a/tensorflow_probability/python/math/psd_kernels/exponentiated_quadratic_test.py +++ b/tensorflow_probability/python/math/psd_kernels/exponentiated_quadratic_test.py @@ -24,11 +24,13 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class ExponentiatedQuadraticTest(tf.test.TestCase, parameterized.TestCase): +class ExponentiatedQuadraticTest(test_case.TestCase, parameterized.TestCase): def testMismatchedFloatTypesAreBad(self): tfp.math.psd_kernels.ExponentiatedQuadratic( diff --git a/tensorflow_probability/python/math/psd_kernels/feature_scaled.py b/tensorflow_probability/python/math/psd_kernels/feature_scaled.py index e635f9612f..3f08274484 100644 --- a/tensorflow_probability/python/math/psd_kernels/feature_scaled.py +++ b/tensorflow_probability/python/math/psd_kernels/feature_scaled.py @@ -80,7 +80,10 @@ def rescale_input(x, feature_ndims, example_ndims): return x / scale_diag super(FeatureScaled, self).__init__( - kernel, transformation_fn=rescale_input, validate_args=validate_args) + kernel, + transformation_fn=rescale_input, + validate_args=validate_args, + name=name) @property def scale_diag(self): diff --git a/tensorflow_probability/python/math/psd_kernels/feature_scaled_test.py b/tensorflow_probability/python/math/psd_kernels/feature_scaled_test.py index 97500cea52..ae6b3362eb 100644 --- a/tensorflow_probability/python/math/psd_kernels/feature_scaled_test.py +++ b/tensorflow_probability/python/math/psd_kernels/feature_scaled_test.py @@ -24,6 +24,8 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -110,11 +112,11 @@ def testKernelParametersBroadcast(self, feature_ndims, dims): self.evaluate(ard_kernel.matrix(z, z))) -class FeatureScaledFloat32Test(_FeatureScaledTest, tf.test.TestCase): +class FeatureScaledFloat32Test(_FeatureScaledTest, test_case.TestCase): dtype = np.float32 -class FeatureScaledFloat64Test(_FeatureScaledTest, tf.test.TestCase): +class FeatureScaledFloat64Test(_FeatureScaledTest, test_case.TestCase): dtype = np.float64 diff --git a/tensorflow_probability/python/math/psd_kernels/feature_transformed_test.py b/tensorflow_probability/python/math/psd_kernels/feature_transformed_test.py index 8a41369aa8..e28fee26bd 100644 --- a/tensorflow_probability/python/math/psd_kernels/feature_transformed_test.py +++ b/tensorflow_probability/python/math/psd_kernels/feature_transformed_test.py @@ -24,6 +24,7 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.math.psd_kernels.internal import util from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -199,11 +200,13 @@ def vector_transform(x, feature_ndims, param_expansion_ndims): self.evaluate(vector_transformed_kernel.matrix(z, z))) -class FeatureTransformedFloat32Test(_FeatureTransformedTest, tf.test.TestCase): +class FeatureTransformedFloat32Test(_FeatureTransformedTest, + test_case.TestCase): dtype = np.float32 -class FeatureTransformedFloat64Test(_FeatureTransformedTest, tf.test.TestCase): +class FeatureTransformedFloat64Test(_FeatureTransformedTest, + test_case.TestCase): dtype = np.float64 diff --git a/tensorflow_probability/python/math/psd_kernels/hypothesis_testlib.py b/tensorflow_probability/python/math/psd_kernels/hypothesis_testlib.py new file mode 100644 index 0000000000..bdd4df9cf6 --- /dev/null +++ b/tensorflow_probability/python/math/psd_kernels/hypothesis_testlib.py @@ -0,0 +1,73 @@ +# Copyright 2019 The TensorFlow Probability Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Utilities for hypothesis testing of psd_kernels.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +from hypothesis.extra import numpy as hpnp +import hypothesis.strategies as hps +import numpy as np + + +@hps.composite +def kernel_input( + draw, + batch_shape, + example_dim=None, + example_ndims=None, + feature_dim=None, + feature_ndims=None): + """Strategy for drawing arbitrary Kernel input. + + Args: + draw: Hypothesis function supplied by `@hps.composite`. + batch_shape: An optional `TensorShape`. The batch shape of the resulting + kernel input. Hypothesis will pick a batch shape if omitted. + example_dim: Optional Python int giving the size of each example dimension. + If omitted, Hypothesis will choose one. + example_ndims: Optional Python int giving the number of example dimensions + of the input. If omitted, Hypothesis will choose one. + feature_dim: Optional Python int giving the size of each feature dimension. + If omitted, Hypothesis will choose one. + feature_ndims: Optional Python int stating the number of feature dimensions + inputs will have. If omitted, Hypothesis will choose one. + Returns: + kernel_input: A strategy for drawing kernel_input with the prescribed shape + (or an arbitrary one if omitted). + """ + if example_ndims is None: + example_ndims = draw(hps.integers(min_value=1, max_value=4)) + if example_dim is None: + example_dim = draw(hps.integers(min_value=2, max_value=6)) + + if feature_ndims is None: + feature_ndims = draw(hps.integers(min_value=1, max_value=4)) + if feature_dim is None: + feature_dim = draw(hps.integers(min_value=2, max_value=6)) + + input_shape = batch_shape + input_shape += [example_dim] * example_ndims + input_shape += [feature_dim] * feature_ndims + # We would like kernel inputs to be unique. This is to avoid computing kernel + # matrices that are semi-definite. + return draw(hpnp.arrays( + dtype=np.float32, + shape=input_shape, + elements=hps.floats( + -100, 100, allow_nan=False, allow_infinity=False), + unique=True)) diff --git a/tensorflow_probability/python/math/psd_kernels/internal/BUILD b/tensorflow_probability/python/math/psd_kernels/internal/BUILD index cda9763561..60f6c2bca9 100644 --- a/tensorflow_probability/python/math/psd_kernels/internal/BUILD +++ b/tensorflow_probability/python/math/psd_kernels/internal/BUILD @@ -53,6 +53,7 @@ py_test( ":util", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/math:gradient", ], ) diff --git a/tensorflow_probability/python/math/psd_kernels/internal/util_test.py b/tensorflow_probability/python/math/psd_kernels/internal/util_test.py index e4b7cc7e0d..7bcf52f5d8 100644 --- a/tensorflow_probability/python/math/psd_kernels/internal/util_test.py +++ b/tensorflow_probability/python/math/psd_kernels/internal/util_test.py @@ -20,13 +20,15 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.math.gradient import value_and_gradient from tensorflow_probability.python.math.psd_kernels.internal import util -class UtilTest(tf.test.TestCase): +class UtilTest(test_case.TestCase): def testPadShapeRightWithOnes(self): # Test nominal behavior. @@ -60,7 +62,7 @@ def testPadShapeMiddleWithOnes(self): def testPadShapeRightWithOnesDynamicShape(self): if tf.executing_eagerly(): return # Test partially unknown shape - x = tf.compat.v1.placeholder_with_default(np.ones([3], np.float32), [None]) + x = tf1.placeholder_with_default(np.ones([3], np.float32), [None]) expanded = util.pad_shape_with_ones(x, 3) self.assertAllEqual(expanded.shape.as_list(), [None, 1, 1, 1]) self.assertAllEqual(self.evaluate(expanded).shape, [3, 1, 1, 1]) @@ -70,7 +72,7 @@ def testPadShapeRightWithOnesDynamicShape(self): self.assertAllEqual(self.evaluate(expanded).shape, [1, 1, 1, 3]) # Test totally unknown shape - x = tf.compat.v1.placeholder_with_default(np.ones([3], np.float32), None) + x = tf1.placeholder_with_default(np.ones([3], np.float32), None) expanded = util.pad_shape_with_ones(x, 3) self.assertIsNone(expanded.shape.ndims) self.assertAllEqual(self.evaluate(expanded).shape, [3, 1, 1, 1]) @@ -100,7 +102,7 @@ def testSumRightmostNdimsPreservingShapeStaticRank(self): util.sum_rightmost_ndims_preserving_shape(x, ndims=2).shape, [5, 4]) - x = tf.compat.v1.placeholder_with_default( + x = tf1.placeholder_with_default( np.ones((5, 4, 3, 2)), shape=[5, 4, None, None]) self.assertAllEqual( util.sum_rightmost_ndims_preserving_shape(x, ndims=1).shape.as_list(), @@ -108,7 +110,7 @@ def testSumRightmostNdimsPreservingShapeStaticRank(self): def testSumRightmostNdimsPreservingShapeDynamicRank(self): if tf.executing_eagerly(): return - x = tf.compat.v1.placeholder_with_default(np.ones((5, 4, 3, 2)), shape=None) + x = tf1.placeholder_with_default(np.ones((5, 4, 3, 2)), shape=None) self.assertIsNone( util.sum_rightmost_ndims_preserving_shape(x, ndims=2).shape.ndims) self.assertAllEqual( @@ -161,7 +163,7 @@ def h(x): rtol=1e-10) def testSqrtWithFiniteGradsWithDynamicShape(self): - x = tf.compat.v1.placeholder_with_default([1.], shape=[None]) + x = tf1.placeholder_with_default([1.], shape=[None]) _, grad_tf_sqrt = value_and_gradient(tf.sqrt, x) _, grad_safe_sqrt = value_and_gradient( util.sqrt_with_finite_grads, x) diff --git a/tensorflow_probability/python/math/psd_kernels/kumaraswamy_transformed.py b/tensorflow_probability/python/math/psd_kernels/kumaraswamy_transformed.py index 021b97c2a1..2b7d0c747a 100644 --- a/tensorflow_probability/python/math/psd_kernels/kumaraswamy_transformed.py +++ b/tensorflow_probability/python/math/psd_kernels/kumaraswamy_transformed.py @@ -18,6 +18,8 @@ from __future__ import division from __future__ import print_function +import functools + import tensorflow.compat.v2 as tf from tensorflow_probability.python.bijectors.kumaraswamy import Kumaraswamy @@ -92,6 +94,20 @@ def concentration1(self): def concentration0(self): return self._concentration0 + def _batch_shape(self): + return functools.reduce( + tf.broadcast_static_shape, + [self.kernel.batch_shape, + self.concentration1.shape[:-self.kernel.feature_ndims], + self.concentration0.shape[:-self.kernel.feature_ndims]]) + + def _batch_shape_tensor(self): + return functools.reduce( + tf.broadcast_dynamic_shape, + [self.kernel.batch_shape_tensor(), + tf.shape(self.concentration1)[:-self.kernel.feature_ndims], + tf.shape(self.concentration0)[:-self.kernel.feature_ndims]]) + def _parameter_control_dependencies(self, is_init): if not self.validate_args: return [] diff --git a/tensorflow_probability/python/math/psd_kernels/kumaraswamy_transformed_test.py b/tensorflow_probability/python/math/psd_kernels/kumaraswamy_transformed_test.py index ccc21b172c..62b70489d9 100644 --- a/tensorflow_probability/python/math/psd_kernels/kumaraswamy_transformed_test.py +++ b/tensorflow_probability/python/math/psd_kernels/kumaraswamy_transformed_test.py @@ -24,6 +24,8 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -143,12 +145,12 @@ def testKernelParametersBroadcast(self, feature_ndims, dims): class KumaraswamyTransformedFloat32Test( - _KumaraswamyTransformedTest, tf.test.TestCase): + _KumaraswamyTransformedTest, test_case.TestCase): dtype = np.float32 class KumaraswamyTransformedFloat64Test( - _KumaraswamyTransformedTest, tf.test.TestCase): + _KumaraswamyTransformedTest, test_case.TestCase): dtype = np.float64 diff --git a/tensorflow_probability/python/math/psd_kernels/matern_test.py b/tensorflow_probability/python/math/psd_kernels/matern_test.py index ab308a02ba..27f73c6b12 100644 --- a/tensorflow_probability/python/math/psd_kernels/matern_test.py +++ b/tensorflow_probability/python/math/psd_kernels/matern_test.py @@ -24,10 +24,12 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -class _MaternTestCase(parameterized.TestCase, tf.test.TestCase): +class _MaternTestCase(parameterized.TestCase, test_case.TestCase): """Mixin test for Matern type kernels. Subclasses must specify _kernel_type and _numpy_kernel. diff --git a/tensorflow_probability/python/math/psd_kernels/polynomial.py b/tensorflow_probability/python/math/psd_kernels/polynomial.py index ddc9750ce4..9cbe73a9bf 100644 --- a/tensorflow_probability/python/math/psd_kernels/polynomial.py +++ b/tensorflow_probability/python/math/psd_kernels/polynomial.py @@ -166,6 +166,8 @@ def _apply(self, x1, x2, example_ndims=0): x1 * x2, ndims=self.feature_ndims) else: shift = tf.convert_to_tensor(self.shift) + shift = util.pad_shape_with_ones( + shift, example_ndims + self.feature_ndims) dot_prod = util.sum_rightmost_ndims_preserving_shape( (x1 - shift) * (x2 - shift), ndims=self.feature_ndims) diff --git a/tensorflow_probability/python/math/psd_kernels/polynomial_test.py b/tensorflow_probability/python/math/psd_kernels/polynomial_test.py index 7274709185..b6d64bd87a 100644 --- a/tensorflow_probability/python/math/psd_kernels/polynomial_test.py +++ b/tensorflow_probability/python/math/psd_kernels/polynomial_test.py @@ -25,11 +25,13 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class PolynomialTest(tf.test.TestCase, parameterized.TestCase): +class PolynomialTest(test_case.TestCase, parameterized.TestCase): """Test the Polynomial kernel.""" def test_mismatched_float_types_are_bad(self): @@ -248,7 +250,7 @@ def testValuesAreCorrect(self): @test_util.run_all_in_graph_and_eager_modes -class LinearTest(tf.test.TestCase, parameterized.TestCase): +class LinearTest(test_case.TestCase, parameterized.TestCase): """Test the Linear kernel.""" def testIsPolynomial(self): diff --git a/tensorflow_probability/python/math/psd_kernels/positive_semidefinite_kernel_test.py b/tensorflow_probability/python/math/psd_kernels/positive_semidefinite_kernel_test.py index d2fdd387e9..3bccc3a85f 100644 --- a/tensorflow_probability/python/math/psd_kernels/positive_semidefinite_kernel_test.py +++ b/tensorflow_probability/python/math/psd_kernels/positive_semidefinite_kernel_test.py @@ -28,6 +28,7 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.math.psd_kernels.internal import util as kernels_util from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @@ -85,7 +86,8 @@ def _apply(self, x1, x2, example_ndims=0): @test_util.run_all_in_graph_and_eager_modes -class PositiveSemidefiniteKernelTest(tf.test.TestCase, parameterized.TestCase): +class PositiveSemidefiniteKernelTest(test_case.TestCase, + parameterized.TestCase): """Test the abstract base class behaviors.""" def createKernelInputs(self, batched=False): diff --git a/tensorflow_probability/python/math/psd_kernels/psd_kernel_properties_test.py b/tensorflow_probability/python/math/psd_kernels/psd_kernel_properties_test.py index d9db2a6f5f..233bf06ae7 100644 --- a/tensorflow_probability/python/math/psd_kernels/psd_kernel_properties_test.py +++ b/tensorflow_probability/python/math/psd_kernels/psd_kernel_properties_test.py @@ -23,14 +23,16 @@ from hypothesis import strategies as hps from hypothesis.extra import numpy as hpnp import numpy as np - import tensorflow.compat.v2 as tf import tensorflow_probability as tfp from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps from tensorflow_probability.python.internal import tensor_util +from tensorflow_probability.python.internal import test_case +from tensorflow_probability.python.math import psd_kernels as tfpk +from tensorflow_probability.python.math.psd_kernels import hypothesis_testlib as kernel_hps + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfpk = tfp.positive_semidefinite_kernels flags.DEFINE_enum('tf_mode', 'graph', ['eager', 'graph'], 'TF execution mode to use') @@ -40,23 +42,30 @@ TF2_FRIENDLY_KERNELS = ( 'ExpSinSquared', 'ExponentiatedQuadratic', + 'FeatureScaled', + 'Linear', 'MaternOneHalf', 'MaternThreeHalves', 'MaternFiveHalves', + 'Polynomial', 'RationalQuadratic', 'SchurComplement', ) SPECIAL_KERNELS = [ + 'FeatureScaled', 'SchurComplement' ] INSTANTIABLE_BASE_KERNELS = { 'ExpSinSquared': dict(amplitude=0, length_scale=0, period=0), 'ExponentiatedQuadratic': dict(amplitude=0, length_scale=0), + 'Linear': dict(bias_variance=0, slope_variance=0, shift=0), 'MaternOneHalf': dict(amplitude=0, length_scale=0), 'MaternThreeHalves': dict(amplitude=0, length_scale=0), 'MaternFiveHalves': dict(amplitude=0, length_scale=0), + 'Polynomial': dict( + bias_variance=0, slope_variance=0, shift=0, exponent=0), 'RationalQuadratic': dict( amplitude=0, length_scale=0, scale_mixture_rate=0), } @@ -93,57 +102,83 @@ def _constraint(param): mutex_params=MUTEX_PARAMS)) +def depths(): + # TODO(b/139841600): Increase the depth after we can generate kernel input + # that are not too close to each other. + return hps.integers(min_value=0, max_value=1) + + @hps.composite -def kernel_input( +def feature_scaleds( draw, - batch_shape, - example_dim=None, - example_ndims=None, + batch_shape=None, + event_dim=None, feature_dim=None, - feature_ndims=None): - """Strategy for drawing arbitrary Kernel input. + feature_ndims=None, + enable_vars=None, + depth=None): + """Strategy for drawing `FeatureScaled` kernels. + + The underlying kernel is drawn from the `kernels` strategy. Args: - draw: Hypothesis function supplied by `@hps.composite`. + draw: Hypothesis strategy sampler supplied by `@hps.composite`. batch_shape: An optional `TensorShape`. The batch shape of the resulting - kernel input. Hypothesis will pick a batch shape if omitted. - example_dim: Optional Python int giving the size of each example dimension. - If omitted, Hypothesis will choose one. - example_ndims: Optional Python int giving the number of example dimensions - of the input. If omitted, Hypothesis will choose one. + Kernel. Hypothesis will pick a batch shape if omitted. + event_dim: Optional Python int giving the size of each of the + kernel's parameters' event dimensions. This is shared across all + parameters, permitting square event matrices, compatible location and + scale Tensors, etc. If omitted, Hypothesis will choose one. feature_dim: Optional Python int giving the size of each feature dimension. If omitted, Hypothesis will choose one. feature_ndims: Optional Python int stating the number of feature dimensions inputs will have. If omitted, Hypothesis will choose one. + enable_vars: TODO(bjp): Make this `True` all the time and put variable + initialization in slicing_test. If `False`, the returned parameters are + all Tensors, never Variables or DeferredTensor. + depth: Python `int` giving maximum nesting depth of compound kernel. + Returns: - kernel_input: A strategy for drawing kernel_input with the prescribed shape - (or an arbitrary one if omitted). + kernels: A strategy for drawing `FeatureScaled` kernels with the specified + `batch_shape` (or an arbitrary one if omitted). """ - if example_ndims is None: - example_ndims = draw(hps.integers(min_value=1, max_value=4)) - if example_dim is None: - example_dim = draw(hps.integers(min_value=2, max_value=6)) - - if feature_ndims is None: - feature_ndims = draw(hps.integers(min_value=1, max_value=4)) + if depth is None: + depth = draw(depths()) + if batch_shape is None: + batch_shape = draw(tfp_hps.shapes()) + if event_dim is None: + event_dim = draw(hps.integers(min_value=2, max_value=6)) if feature_dim is None: feature_dim = draw(hps.integers(min_value=2, max_value=6)) + if feature_ndims is None: + feature_ndims = draw(hps.integers(min_value=2, max_value=6)) - input_shape = batch_shape - input_shape += [example_dim] * example_ndims - input_shape += [feature_dim] * feature_ndims - # We would like kernel inputs to be unique. This is to avoid computing kernel - # matrices that are semi-definite. - return draw(hpnp.arrays( - dtype=np.float32, - shape=input_shape, - elements=hps.floats( - -100, 100, allow_nan=False, allow_infinity=False), - unique=True)) - - -def depths(): - return hps.integers(min_value=0, max_value=2) + base_kernel, kernel_variable_names = draw(kernels( + batch_shape=batch_shape, + event_dim=event_dim, + feature_dim=feature_dim, + feature_ndims=feature_ndims, + enable_vars=False, + depth=depth-1)) + scale_diag = tfp_hps.softplus_plus_eps()(draw(kernel_hps.kernel_input( + batch_shape=batch_shape, + example_ndims=0, + feature_dim=feature_dim, + feature_ndims=feature_ndims))) + + hp.note('Forming FeatureScaled kernel with scale_diag: {} '.format( + scale_diag)) + + if enable_vars and draw(hps.booleans()): + kernel_variable_names.append('scale_diag') + scale_diag = tf.Variable(scale_diag, name='scale_diag') + # Don't enable variable counting. This is because rescaling is + # done for each input, which will exceed two convert_to_tensor calls. + result_kernel = tfp.positive_semidefinite_kernels.FeatureScaled( + kernel=base_kernel, + scale_diag=scale_diag, + validate_args=True) + return result_kernel, kernel_variable_names @hps.composite @@ -200,7 +235,7 @@ def schur_complements( depth=depth-1)) # SchurComplement requires the inputs to have one example dimension. - fixed_inputs = draw(kernel_input( + fixed_inputs = draw(kernel_hps.kernel_input( batch_shape=batch_shape, example_ndims=1, feature_dim=feature_dim, @@ -240,6 +275,7 @@ def base_kernels( kernel_name=None, batch_shape=None, event_dim=None, + feature_dim=None, feature_ndims=None, enable_vars=False): if kernel_name is None: @@ -248,6 +284,8 @@ def base_kernels( batch_shape = draw(tfp_hps.shapes()) if event_dim is None: event_dim = draw(hps.integers(min_value=2, max_value=6)) + if feature_dim is None: + feature_dim = draw(hps.integers(min_value=2, max_value=6)) if feature_ndims is None: feature_ndims = draw(hps.integers(min_value=2, max_value=6)) @@ -318,6 +356,7 @@ def kernels( kernel_name, batch_shape=batch_shape, event_dim=event_dim, + feature_dim=feature_dim, feature_ndims=feature_ndims, enable_vars=enable_vars)) @@ -329,6 +368,15 @@ def kernels( feature_ndims=feature_ndims, enable_vars=enable_vars, depth=depth)) + elif kernel_name == 'FeatureScaled': + return draw(feature_scaleds( + batch_shape=batch_shape, + event_dim=event_dim, + feature_dim=feature_dim, + feature_ndims=feature_ndims, + enable_vars=enable_vars, + depth=depth)) + raise ValueError('Kernel name not found.') @@ -340,7 +388,7 @@ def assert_no_none_grad(kernel, method, wrt_vars, grads): @test_util.run_all_in_graph_and_eager_modes -class KernelPropertiesTest(tf.test.TestCase, parameterized.TestCase): +class KernelPropertiesTest(test_case.TestCase, parameterized.TestCase): @parameterized.parameters((bname,) for bname in TF2_FRIENDLY_KERNELS) @hp.given(hps.data()) @@ -374,7 +422,7 @@ def testKernelGradient(self, kernel_name, data): example_ndims = data.draw(hps.integers(min_value=1, max_value=3)) input_batch_shape = data.draw(tfp_hps.broadcast_compatible_shape( kernel.batch_shape)) - xs = tf.identity(data.draw(kernel_input( + xs = tf.identity(data.draw(kernel_hps.kernel_input( batch_shape=input_batch_shape, example_ndims=example_ndims, feature_dim=feature_dim, @@ -393,13 +441,14 @@ def testKernelGradient(self, kernel_name, data): CONSTRAINTS = { - 'amplitude': tfp_hps.softplus_plus_eps(), + # Keep amplitudes large enough so that the matrices are well conditioned. + 'amplitude': tfp_hps.softplus_plus_eps(1.), + 'bias_variance': tfp_hps.softplus_plus_eps(1.), + 'slope_variance': tfp_hps.softplus_plus_eps(1.), + 'exponent': tfp_hps.softplus_plus_eps(), 'length_scale': tfp_hps.softplus_plus_eps(), 'period': tfp_hps.softplus_plus_eps(), 'scale_mixture_rate': tfp_hps.softplus_plus_eps(), - 'bias_variance': tfp_hps.softplus_plus_eps(), - 'slope_variance': tfp_hps.softplus_plus_eps(), - 'exponent': tfp_hps.softplus_plus_eps(), } diff --git a/tensorflow_probability/python/math/psd_kernels/rational_quadratic_test.py b/tensorflow_probability/python/math/psd_kernels/rational_quadratic_test.py index 647eafa0b2..65cb87236e 100644 --- a/tensorflow_probability/python/math/psd_kernels/rational_quadratic_test.py +++ b/tensorflow_probability/python/math/psd_kernels/rational_quadratic_test.py @@ -24,11 +24,13 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class RationalQuadraticTest(tf.test.TestCase, parameterized.TestCase): +class RationalQuadraticTest(test_case.TestCase, parameterized.TestCase): def _rational_quadratic( self, amplitude, length_scale, scale_mixture_rate, x, y): diff --git a/tensorflow_probability/python/math/psd_kernels/schur_complement_test.py b/tensorflow_probability/python/math/psd_kernels/schur_complement_test.py index 1444eca629..73c9c890be 100644 --- a/tensorflow_probability/python/math/psd_kernels/schur_complement_test.py +++ b/tensorflow_probability/python/math/psd_kernels/schur_complement_test.py @@ -27,6 +27,8 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @@ -52,7 +54,7 @@ def _broadcast_2(s1, s2): @test_util.run_all_in_graph_and_eager_modes -class SchurComplementTest(tf.test.TestCase, parameterized.TestCase): +class SchurComplementTest(test_case.TestCase, parameterized.TestCase): def testMismatchedFloatTypesAreBad(self): base_kernel = tfp.math.psd_kernels.ExponentiatedQuadratic( diff --git a/tensorflow_probability/python/math/random_ops_test.py b/tensorflow_probability/python/math/random_ops_test.py index 525247a938..87e647216b 100644 --- a/tensorflow_probability/python/math/random_ops_test.py +++ b/tensorflow_probability/python/math/random_ops_test.py @@ -26,6 +26,7 @@ import tensorflow_probability as tfp from tensorflow_probability.python.internal import dtype_util +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -50,13 +51,13 @@ def test_expected_value(self): @test_util.run_all_in_graph_and_eager_modes -class RandomRademacherDynamic32(tf.test.TestCase, _RandomRademacher): +class RandomRademacherDynamic32(test_case.TestCase, _RandomRademacher): dtype = np.float32 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes -class RandomRademacherDynamic64(tf.test.TestCase, _RandomRademacher): +class RandomRademacherDynamic64(test_case.TestCase, _RandomRademacher): dtype = np.float64 use_static_shape = True @@ -95,13 +96,13 @@ def test_expected_value(self): @test_util.run_all_in_graph_and_eager_modes -class RandomRayleighDynamic32(tf.test.TestCase, _RandomRayleigh): +class RandomRayleighDynamic32(test_case.TestCase, _RandomRayleigh): dtype = np.float32 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes -class RandomRayleighDynamic64(tf.test.TestCase, _RandomRayleigh): +class RandomRayleighDynamic64(test_case.TestCase, _RandomRayleigh): dtype = np.float64 use_static_shape = True diff --git a/tensorflow_probability/python/math/root_search_test.py b/tensorflow_probability/python/math/root_search_test.py index 5d64c2955c..1d3d1e9b27 100644 --- a/tensorflow_probability/python/math/root_search_test.py +++ b/tensorflow_probability/python/math/root_search_test.py @@ -24,11 +24,13 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class RootSearchTest(tf.test.TestCase): +class RootSearchTest(test_case.TestCase): def test_secant_finds_all_roots_from_one_initial_position(self): f = lambda x: (63 * x**5 - 70 * x**3 + 15 * x) / 8. diff --git a/tensorflow_probability/python/math/sparse_test.py b/tensorflow_probability/python/math/sparse_test.py index 0deda4223a..aca0182155 100644 --- a/tensorflow_probability/python/math/sparse_test.py +++ b/tensorflow_probability/python/math/sparse_test.py @@ -25,23 +25,25 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -def _assert_sparse_tensor_value(test_case, expected, actual): - test_case.assertEqual(np.int64, np.array(actual.indices).dtype) - test_case.assertAllEqual(expected.indices, actual.indices) +def _assert_sparse_tensor_value(test_case_instance, expected, actual): + test_case_instance.assertEqual(np.int64, np.array(actual.indices).dtype) + test_case_instance.assertAllEqual(expected.indices, actual.indices) - test_case.assertEqual( + test_case_instance.assertEqual( np.array(expected.values).dtype, np.array(actual.values).dtype) - test_case.assertAllEqual(expected.values, actual.values) + test_case_instance.assertAllEqual(expected.values, actual.values) - test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype) - test_case.assertAllEqual(expected.dense_shape, actual.dense_shape) + test_case_instance.assertEqual(np.int64, np.array(actual.dense_shape).dtype) + test_case_instance.assertAllEqual(expected.dense_shape, actual.dense_shape) @test_util.run_all_in_graph_and_eager_modes -class SparseTest(tf.test.TestCase): +class SparseTest(test_case.TestCase): # Copied (with modifications) from: # tensorflow/contrib/layers/python/ops/sparse_ops.py. diff --git a/tensorflow_probability/python/mcmc/BUILD b/tensorflow_probability/python/mcmc/BUILD index 6770255b5c..f071b04fcc 100644 --- a/tensorflow_probability/python/mcmc/BUILD +++ b/tensorflow_probability/python/mcmc/BUILD @@ -69,6 +69,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -93,6 +94,7 @@ py_test( # absl/testing:parameterized dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -124,6 +126,7 @@ py_test( # scipy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -144,6 +147,7 @@ py_library( py_library( name = "nuts_testlib", + testonly = 1, srcs = ["nuts_test.py"], srcs_version = "PY2AND3", deps = [ @@ -153,6 +157,8 @@ py_library( "//tensorflow_probability", "//tensorflow_probability/python/distributions/internal:statistical_testing", "//tensorflow_probability/python/internal:assert_util", + "//tensorflow_probability/python/internal:test_case", + "//tensorflow_probability/python/internal:test_util", ], ) @@ -165,6 +171,7 @@ py_test( tags = ["nozapfhahn"], deps = [ ":nuts_testlib", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -180,7 +187,10 @@ py_test( "nozapfhahn", "requires-gpu-sm35", ], - deps = [":nuts_testlib"], + deps = [ + ":nuts_testlib", + "//tensorflow_probability/python/internal:test_case", + ], ) py_library( @@ -207,6 +217,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -239,6 +250,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -263,6 +275,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -288,6 +301,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -311,6 +325,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -333,6 +348,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -356,6 +372,7 @@ py_test( # tensorflow dep, "//tensorflow_probability", "//tensorflow_probability/python/internal:monte_carlo", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -383,6 +400,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/mcmc/internal", ], ) @@ -409,6 +427,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -465,6 +484,7 @@ py_test( # absl/testing:parameterized dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) diff --git a/tensorflow_probability/python/mcmc/diagnostic_test.py b/tensorflow_probability/python/mcmc/diagnostic_test.py index 923b6539df..2396d33718 100644 --- a/tensorflow_probability/python/mcmc/diagnostic_test.py +++ b/tensorflow_probability/python/mcmc/diagnostic_test.py @@ -19,12 +19,14 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.mcmc.diagnostic import _reduce_variance + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import from tensorflow.python.ops import spectral_ops_test_util # pylint: disable=g-direct-tensorflow-import @@ -320,7 +322,7 @@ def testInitialPositiveSuperEfficient(self): @test_util.run_all_in_graph_and_eager_modes -class EffectiveSampleSizeStaticTest(tf.test.TestCase, +class EffectiveSampleSizeStaticTest(test_case.TestCase, _EffectiveSampleSizeTest): @property @@ -329,7 +331,7 @@ def use_static_shape(self): @test_util.run_all_in_graph_and_eager_modes -class EffectiveSampleSizeDynamicTest(tf.test.TestCase, +class EffectiveSampleSizeDynamicTest(test_case.TestCase, _EffectiveSampleSizeTest): @property @@ -524,7 +526,7 @@ def testNotEnoughSamplesWithSplitChainsFailsIfValidateArgs(self): @test_util.run_all_in_graph_and_eager_modes -class PotentialScaleReductionStaticTest(tf.test.TestCase, +class PotentialScaleReductionStaticTest(test_case.TestCase, _PotentialScaleReductionTest): @property @@ -541,7 +543,7 @@ def testIndependentNdimsLessThanOneRaises(self): @test_util.run_all_in_graph_and_eager_modes -class PotentialScaleReductionDynamicTest(tf.test.TestCase, +class PotentialScaleReductionDynamicTest(test_case.TestCase, _PotentialScaleReductionTest): @property @@ -608,7 +610,7 @@ def testShape2x3x4x5Axis13BiasedFalseKeepdimsFalse(self): @test_util.run_all_in_graph_and_eager_modes -class ReduceVarianceTestStaticShape(tf.test.TestCase, _ReduceVarianceTest): +class ReduceVarianceTestStaticShape(test_case.TestCase, _ReduceVarianceTest): @property def use_static_shape(self): @@ -616,7 +618,7 @@ def use_static_shape(self): @test_util.run_all_in_graph_and_eager_modes -class ReduceVarianceTestDynamicShape(tf.test.TestCase, _ReduceVarianceTest): +class ReduceVarianceTestDynamicShape(test_case.TestCase, _ReduceVarianceTest): @property def use_static_shape(self): diff --git a/tensorflow_probability/python/mcmc/dual_averaging_step_size_adaptation_test.py b/tensorflow_probability/python/mcmc/dual_averaging_step_size_adaptation_test.py index 80d20abef0..83b2f36bbd 100644 --- a/tensorflow_probability/python/mcmc/dual_averaging_step_size_adaptation_test.py +++ b/tensorflow_probability/python/mcmc/dual_averaging_step_size_adaptation_test.py @@ -24,14 +24,15 @@ from absl.testing import parameterized import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions _INITIAL_T = 10.0 _EXPLORATION_SHRINKAGE = 0.05 @@ -138,7 +139,7 @@ def is_calibrated(self): @test_util.run_all_in_graph_and_eager_modes -class DualAveragingStepSizeAdaptationTest(tf.test.TestCase, +class DualAveragingStepSizeAdaptationTest(test_case.TestCase, parameterized.TestCase): def testTurnOnStoreParametersInKernelResults(self): @@ -277,7 +278,7 @@ def _impl(): _impl() def testExample(self): - tf.compat.v1.random.set_random_seed(tfp_test_util.test_seed()) + tf1.random.set_random_seed(tfp_test_util.test_seed()) target_dist = tfd.JointDistributionSequential([ tfd.Normal(0., 1.5), tfd.Independent( @@ -311,7 +312,7 @@ def testExample(self): @test_util.run_all_in_graph_and_eager_modes class DualAveragingStepSizeAdaptationStaticBroadcastingTest( - tf.test.TestCase, + test_case.TestCase, parameterized.TestCase): use_static_shape = True @@ -356,7 +357,7 @@ def testBroadcasting(self, old_step_size, new_step_size): np.log([[0.70, 0.76, 0.73], [0.76, 0.76, 0.73]]), dtype=tf.float64) - log_accept_ratio = tf.compat.v1.placeholder_with_default( + log_accept_ratio = tf1.placeholder_with_default( input=log_accept_ratio, shape=log_accept_ratio.shape if self.use_static_shape else None) state = [ diff --git a/tensorflow_probability/python/mcmc/eight_schools_hmc.py b/tensorflow_probability/python/mcmc/eight_schools_hmc.py index 7d7a5b4341..f68046bf92 100644 --- a/tensorflow_probability/python/mcmc/eight_schools_hmc.py +++ b/tensorflow_probability/python/mcmc/eight_schools_hmc.py @@ -21,7 +21,8 @@ import time # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp tfd = tfp.distributions @@ -110,7 +111,7 @@ def computation(): # trial. is_accepted_tensor = computation() if not tf.executing_eagerly(): - session = tf.compat.v1.Session() + session = tf1.Session() session.run(is_accepted_tensor) start_time = time.time() diff --git a/tensorflow_probability/python/mcmc/eight_schools_hmc_eager_test.py b/tensorflow_probability/python/mcmc/eight_schools_hmc_eager_test.py index e925de52f2..8389669e58 100644 --- a/tensorflow_probability/python/mcmc/eight_schools_hmc_eager_test.py +++ b/tensorflow_probability/python/mcmc/eight_schools_hmc_eager_test.py @@ -12,11 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.mcmc.eight_schools_hmc import EightSchoolsHmcBenchmarkTestHarness -tf.compat.v1.enable_eager_execution() +tf1.enable_eager_execution() class EagerEightSchoolsHmcBenchmark( diff --git a/tensorflow_probability/python/mcmc/eight_schools_hmc_graph_test.py b/tensorflow_probability/python/mcmc/eight_schools_hmc_graph_test.py index 18b879c411..48ffaca01d 100644 --- a/tensorflow_probability/python/mcmc/eight_schools_hmc_graph_test.py +++ b/tensorflow_probability/python/mcmc/eight_schools_hmc_graph_test.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.mcmc.eight_schools_hmc import EightSchoolsHmcBenchmarkTestHarness diff --git a/tensorflow_probability/python/mcmc/hmc.py b/tensorflow_probability/python/mcmc/hmc.py index 3b2eec5eae..fa7256f9e3 100644 --- a/tensorflow_probability/python/mcmc/hmc.py +++ b/tensorflow_probability/python/mcmc/hmc.py @@ -22,7 +22,8 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.mcmc import kernel as kernel_base @@ -104,7 +105,7 @@ def make_simple_step_size_update_policy(num_adaptation_steps, `step_size_var, kernel_results` and returns updated step size(s). """ if step_counter is None and num_adaptation_steps is not None: - step_counter = tf.compat.v1.get_variable( + step_counter = tf1.get_variable( name='step_size_adaptation_step_counter', initializer=np.array(-1, dtype=np.int32), # Specify the dtype for variable sharing to work correctly @@ -137,7 +138,7 @@ def step_size_simple_update_fn(step_size_var, kernel_results): kernel_results.log_accept_ratio.dtype)) log_mean_accept_ratio = tf.reduce_logsumexp( input_tensor=tf.minimum(kernel_results.log_accept_ratio, 0.)) - log_n - adjustment = tf.compat.v1.where( + adjustment = tf1.where( log_mean_accept_ratio < tf.cast( tf.math.log(target_rate), log_mean_accept_ratio.dtype), -decrement_multiplier / (1. + decrement_multiplier), @@ -297,10 +298,10 @@ def make_response_likelihood(w, x): y, x, _ = make_training_data( num_samples, dims, weights_prior_true_scale) - log_sigma = tf.compat.v2.Variable( + log_sigma = tf.Variable( name='log_sigma', initial_value=np.array(0, dtype)) - optimizer = tf.compat.v2.optimizers.SGD(learning_rate=0.01) + optimizer = tf.optimizers.SGD(learning_rate=0.01) @tf.function def mcem_iter(weights_chain_start, step_size): @@ -694,7 +695,7 @@ def _store_parameters_in_results(self): @mcmc_util.set_doc(HamiltonianMonteCarlo.one_step.__doc__) def one_step(self, current_state, previous_kernel_results): - with tf.compat.v2.name_scope( + with tf.name_scope( mcmc_util.make_name(self.name, 'hmc', 'one_step')): if self._store_parameters_in_results: step_size = previous_kernel_results.step_size @@ -758,7 +759,7 @@ def maybe_flatten(x): @mcmc_util.set_doc(HamiltonianMonteCarlo.bootstrap_results.__doc__) def bootstrap_results(self, init_state): - with tf.compat.v2.name_scope( + with tf.name_scope( mcmc_util.make_name(self.name, 'hmc', 'bootstrap_results')): if not mcmc_util.is_list_like(init_state): init_state = [init_state] @@ -873,7 +874,7 @@ def _compute_log_acceptance_correction(current_momentums, log_acceptance_correction: `Tensor` representing the `log` acceptance-correction. (See docstring for mathematical definition.) """ - with tf.compat.v2.name_scope(name or 'compute_log_acceptance_correction'): + with tf.name_scope(name or 'compute_log_acceptance_correction'): log_current_kinetic, log_proposed_kinetic = [], [] for current_momentum, proposed_momentum in zip( current_momentums, proposed_momentums): diff --git a/tensorflow_probability/python/mcmc/hmc_test.py b/tensorflow_probability/python/mcmc/hmc_test.py index 00165d828b..191462f1de 100644 --- a/tensorflow_probability/python/mcmc/hmc_test.py +++ b/tensorflow_probability/python/mcmc/hmc_test.py @@ -20,26 +20,27 @@ import collections import warnings + # Dependency imports + import numpy as np from scipy import stats - -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.mcmc.hmc import _compute_log_acceptance_correction from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfb = tfp.bijectors -tfd = tfp.distributions - def _set_seed(seed): """Helper which uses graph seed if using eager.""" # TODO(b/68017812): Deprecate once eager correctly supports seed. if tf.executing_eagerly(): - tf.compat.v1.set_random_seed(seed) + tf1.set_random_seed(seed) return None return seed @@ -53,13 +54,13 @@ def _reduce_variance(x, axis=None, keepdims=False): @test_util.run_all_in_graph_and_eager_modes -class HMCTest(tf.test.TestCase): +class HMCTest(test_case.TestCase): def setUp(self): self._shape_param = 5. self._rate_param = 10. - tf.compat.v1.random.set_random_seed(10003) + tf1.random.set_random_seed(10003) np.random.seed(10003) def assertAllFinite(self, x): @@ -168,9 +169,9 @@ def log_gamma_log_prob(x): actual_exp_x = np.exp(samples_).mean() acceptance_probs = np.exp(np.minimum(log_accept_ratio_, 0.)) - tf.compat.v1.logging.vlog( + tf1.logging.vlog( 1, 'True E[x, exp(x)]: {}\t{}'.format(expected_x_, expected_exp_x)) - tf.compat.v1.logging.vlog( + tf1.logging.vlog( 1, 'Estimated E[x, exp(x)]: {}\t{}'.format(actual_x, actual_exp_x)) self.assertNear(actual_x, expected_x_, 2e-2) self.assertNear(actual_exp_x, expected_exp_x, 2e-2) @@ -194,7 +195,7 @@ def testHMCChainExpectations2(self): def testKernelResultsUsingTruncatedDistribution(self): def log_prob(x): - return tf.compat.v1.where( + return tf1.where( x >= 0., -x - x**2, # Non-constant gradient. tf.fill(x.shape, tf.cast(-np.inf, x.dtype))) @@ -347,15 +348,15 @@ def fake_log_prob(x): _, ks_p_value_fake = stats.ks_2samp(initial_draws_.flatten(), fake_draws_.flatten()) - tf.compat.v1.logging.vlog( + tf1.logging.vlog( 1, 'acceptance rate for true target: {}'.format(acceptance_probs.mean())) - tf.compat.v1.logging.vlog( + tf1.logging.vlog( 1, 'acceptance rate for fake target: {}'.format( bad_acceptance_probs.mean())) - tf.compat.v1.logging.vlog( + tf1.logging.vlog( 1, 'K-S p-value for true target: {}'.format(ks_p_value_true)) - tf.compat.v1.logging.vlog( + tf1.logging.vlog( 1, 'K-S p-value for fake target: {}'.format(ks_p_value_fake)) # Make sure that the MCMC update hasn't changed the empirical CDF much. self.assertGreater(ks_p_value_true, 1e-3) @@ -404,7 +405,7 @@ def testNanRejection(self): """ def _unbounded_exponential_log_prob(x): """An exponential distribution with log-likelihood NaN for x < 0.""" - per_element_potentials = tf.compat.v1.where( + per_element_potentials = tf1.where( x < 0., tf.fill(tf.shape(input=x), x.dtype.as_numpy_dtype(np.nan)), -x) return tf.reduce_sum(input_tensor=per_element_potentials) @@ -422,9 +423,9 @@ def _unbounded_exponential_log_prob(x): [initial_x, updated_x, kernel_results.log_accept_ratio]) acceptance_probs = np.exp(np.minimum(log_accept_ratio_, 0.)) - tf.compat.v1.logging.vlog(1, 'initial_x = {}'.format(initial_x_)) - tf.compat.v1.logging.vlog(1, 'updated_x = {}'.format(updated_x_)) - tf.compat.v1.logging.vlog(1, + tf1.logging.vlog(1, 'initial_x = {}'.format(initial_x_)) + tf1.logging.vlog(1, 'updated_x = {}'.format(updated_x_)) + tf1.logging.vlog(1, 'log_accept_ratio = {}'.format(log_accept_ratio_)) self.assertAllEqual(initial_x_, updated_x_) @@ -432,7 +433,7 @@ def _unbounded_exponential_log_prob(x): def testNanFromGradsDontPropagate(self): """Test that update with NaN gradients does not cause NaN in results.""" - if tf.compat.v1.control_flow_v2_enabled(): + if tf1.control_flow_v2_enabled(): self.skipTest('b/138796859') if tf.executing_eagerly(): return def _nan_log_prob_with_nan_gradient(x): @@ -451,9 +452,9 @@ def _nan_log_prob_with_nan_gradient(x): [initial_x, updated_x, kernel_results.log_accept_ratio]) acceptance_probs = np.exp(np.minimum(log_accept_ratio_, 0.)) - tf.compat.v1.logging.vlog(1, 'initial_x = {}'.format(initial_x_)) - tf.compat.v1.logging.vlog(1, 'updated_x = {}'.format(updated_x_)) - tf.compat.v1.logging.vlog(1, + tf1.logging.vlog(1, 'initial_x = {}'.format(initial_x_)) + tf1.logging.vlog(1, 'updated_x = {}'.format(updated_x_)) + tf1.logging.vlog(1, 'log_accept_ratio = {}'.format(log_accept_ratio_)) self.assertAllEqual(initial_x_, updated_x_) @@ -608,7 +609,7 @@ def testAdaptiveParameters(self): self.assertAllEqual([True, True], r2_.is_accepted) def testAdaptiveIncompatibleWithStepSizeAdaptation(self): - step_size = tf.compat.v2.Variable( + step_size = tf.Variable( initial_value=1., name='step_size', trainable=False) @@ -628,8 +629,8 @@ def testWarnMutableParameters(self): with warnings.catch_warnings(record=True) as triggered: tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=lambda x: -x**2., - num_leapfrog_steps=tf.compat.v2.Variable(2.), - step_size=tf.compat.v2.Variable(0.1), + num_leapfrog_steps=tf.Variable(2.), + step_size=tf.Variable(0.1), store_parameters_in_results=False) self.assertTrue( any('Please consult the docstring' in str(warning.message) @@ -735,17 +736,17 @@ def testHandlesNanFromKinetic(self): @test_util.run_all_in_graph_and_eager_modes -class LogCorrectionTest16(tf.test.TestCase, _LogCorrectionTest): +class LogCorrectionTest16(test_case.TestCase, _LogCorrectionTest): dtype = np.float16 @test_util.run_all_in_graph_and_eager_modes -class LogCorrectionTest32(tf.test.TestCase, _LogCorrectionTest): +class LogCorrectionTest32(test_case.TestCase, _LogCorrectionTest): dtype = np.float32 @test_util.run_all_in_graph_and_eager_modes -class LogCorrectionTest64(tf.test.TestCase, _LogCorrectionTest): +class LogCorrectionTest64(test_case.TestCase, _LogCorrectionTest): dtype = np.float64 @@ -801,20 +802,20 @@ def target_log_prob(x, y): @test_util.run_all_in_graph_and_eager_modes -class HMCHandlesLists32(_HMCHandlesLists, tf.test.TestCase): +class HMCHandlesLists32(_HMCHandlesLists, test_case.TestCase): dtype = np.float32 @test_util.run_all_in_graph_and_eager_modes -class HMCHandlesLists64(_HMCHandlesLists, tf.test.TestCase): +class HMCHandlesLists64(_HMCHandlesLists, test_case.TestCase): dtype = np.float64 @test_util.run_all_in_graph_and_eager_modes -class HMCAdaptiveStepSize(tf.test.TestCase): +class HMCAdaptiveStepSize(test_case.TestCase): def setUp(self): - tf.compat.v1.random.set_random_seed(10014) + tf1.random.set_random_seed(10014) np.random.seed(10014) def test_multiple_step_sizes_different_ranks(self): @@ -844,7 +845,7 @@ def target_log_prob_fn(x1, x2): seed=_set_seed(252)), parallel_iterations=1) - init_op = tf.compat.v1.global_variables_initializer() + init_op = tf1.global_variables_initializer() self.evaluate(init_op) _ = self.evaluate(samples) @@ -857,10 +858,10 @@ def test_multiple_step_sizes_different_dtype(self): dtype = np.float64 step_size = [ - tf.compat.v2.Variable(initial_value=np.array(initial_step_size, dtype), + tf.Variable(initial_value=np.array(initial_step_size, dtype), name='step_size', trainable=False) for initial_step_size in initial_step_sizes] - step_counter = tf.compat.v2.Variable( + step_counter = tf.Variable( name='step_size_adaptation_step_counter1', initial_value=np.array(-1, dtype=np.int32), trainable=False) @@ -884,7 +885,7 @@ def target_log_prob_fn(x1, x2): seed=_set_seed(252)), parallel_iterations=1) - init_op = tf.compat.v1.global_variables_initializer() + init_op = tf1.global_variables_initializer() self.evaluate(init_op) step_size_ = self.evaluate(kernel_results.extra.step_size_assign) @@ -905,11 +906,11 @@ def test_finite_adaptation(self): initial_step_size = 1e-5 dtype = np.float32 - step_size = tf.compat.v2.Variable( + step_size = tf.Variable( initial_value=np.array(initial_step_size, dtype), name='step_size', trainable=False) - step_counter = tf.compat.v2.Variable( + step_counter = tf.Variable( name='step_size_adaptation_step_counter2', initial_value=np.array(-1, dtype=np.int32), trainable=False) @@ -931,7 +932,7 @@ def test_finite_adaptation(self): seed=_set_seed(252)), parallel_iterations=1) - init_op = tf.compat.v1.global_variables_initializer() + init_op = tf1.global_variables_initializer() self.evaluate(init_op) [_, step_size_] = self.evaluate([ @@ -949,17 +950,17 @@ def test_finite_adaptation(self): def test_reuse_step_counter(self): for _ in range(2): - with tf.compat.v1.variable_scope( - tf.compat.v1.get_variable_scope(), reuse=tf.compat.v1.AUTO_REUSE): + with tf1.variable_scope( + tf1.get_variable_scope(), reuse=tf1.AUTO_REUSE): tfp.mcmc.make_simple_step_size_update_policy(num_adaptation_steps=1) @test_util.run_all_in_graph_and_eager_modes -class HMCEMAdaptiveStepSize(tf.test.TestCase): +class HMCEMAdaptiveStepSize(test_case.TestCase): """This test verifies that the docstring example works as advertised.""" def setUp(self): - tf.compat.v1.random.set_random_seed(10014) + tf1.random.set_random_seed(10014) np.random.seed(10014) def make_training_data(self, num_samples, dims, sigma): @@ -991,12 +992,12 @@ def test_mcem_converges(self): weights_prior_true_scale = np.array(0.3, dtype) y, x, w0 = self.make_training_data(num_samples, dims, weights_prior_true_scale) - tf.compat.v1.logging.vlog(1, 'w0: %s', w0) + tf1.logging.vlog(1, 'w0: %s', w0) - log_sigma = tf.compat.v2.Variable( + log_sigma = tf.Variable( name='log_sigma', initial_value=np.array(0, dtype)) - optimizer = tf.compat.v2.optimizers.SGD(learning_rate=0.01) + optimizer = tf.optimizers.SGD(learning_rate=0.01) def mcem_iter(weights_chain_start, step_size): with tf.GradientTape() as tape: @@ -1050,7 +1051,7 @@ def trace_fn(_, pkr): if not tf.executing_eagerly(): # To create the variables. mcem_iter(np.zeros(dims, dtype), 0.) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) num_iters = int(40) @@ -1075,7 +1076,7 @@ def trace_fn(_, pkr): # bazel test --test_output=streamed -c opt :hmc_test \ # --test_filter=HMCEMAdaptiveStepSize \ # --test_arg="--logtostderr" --test_arg="--vmodule=hmc_test=2" - tf.compat.v1.logging.vlog( + tf1.logging.vlog( 1, ('iter:{:>2} loss:{: 9.3f} scale:{:.3f} ' 'step_size:{:.4f} avg_acceptance_ratio:{:.4f}').format( iter_, loss_[iter_], weights_prior_estimated_scale_[iter_], @@ -1126,5 +1127,5 @@ def trace_fn(_, pkr): if __name__ == '__main__': # TODO(b/138844773): Enable this. - tf.compat.v1.disable_control_flow_v2() + tf1.disable_control_flow_v2() tf.test.main() diff --git a/tensorflow_probability/python/mcmc/internal/BUILD b/tensorflow_probability/python/mcmc/internal/BUILD index 0beab43406..f00eb28582 100644 --- a/tensorflow_probability/python/mcmc/internal/BUILD +++ b/tensorflow_probability/python/mcmc/internal/BUILD @@ -66,6 +66,7 @@ py_test( # absl/testing:parameterized dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -88,5 +89,6 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/mcmc/internal/leapfrog_integrator_test.py b/tensorflow_probability/python/mcmc/internal/leapfrog_integrator_test.py index a2c09afd8e..9bff4eba2c 100644 --- a/tensorflow_probability/python/mcmc/internal/leapfrog_integrator_test.py +++ b/tensorflow_probability/python/mcmc/internal/leapfrog_integrator_test.py @@ -19,20 +19,22 @@ from __future__ import print_function import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.mcmc.internal import leapfrog_integrator as leapfrog_impl from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class LeapfrogIntegratorTest(tf.test.TestCase): +class LeapfrogIntegratorTest(test_case.TestCase): def setUp(self): self._shape_param = 5. self._rate_param = 10. - tf.compat.v1.random.set_random_seed(10003) + tf1.random.set_random_seed(10003) np.random.seed(10003) def assertAllFinite(self, x): @@ -76,7 +78,7 @@ def _integrator_conserves_energy(self, x, independent_chain_ndims): input_tensor=new_m**2., axis=event_dims) old_energy_, new_energy_ = self.evaluate([old_energy, new_energy]) - tf.compat.v1.logging.vlog( + tf1.logging.vlog( 1, 'average energy relative change: {}'.format( (1. - new_energy_ / old_energy_).mean())) self.assertAllClose(old_energy_, new_energy_, atol=0., rtol=0.02) diff --git a/tensorflow_probability/python/mcmc/internal/slice_sampler_utils.py b/tensorflow_probability/python/mcmc/internal/slice_sampler_utils.py index 658f2d8058..cdd0584581 100644 --- a/tensorflow_probability/python/mcmc/internal/slice_sampler_utils.py +++ b/tensorflow_probability/python/mcmc/internal/slice_sampler_utils.py @@ -18,7 +18,8 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.distributions.bernoulli import Bernoulli from tensorflow_probability.python.util.seed_stream import SeedStream @@ -61,7 +62,7 @@ def _left_doubling_increments(batch_shape, max_doublings, step_size, seed=None, widths: A tensor of shape (max_doublings+1, ones_like(batch_shape)). The widths of the intervals at each stage of the doubling. """ - with tf.compat.v1.name_scope(name, 'left_doubling_increments', + with tf1.name_scope(name, 'left_doubling_increments', [batch_shape, max_doublings, step_size]): step_size = tf.convert_to_tensor(value=step_size) @@ -115,7 +116,7 @@ def _find_best_interval_idx(x, name=None): first set of bounds outside the slice and if there are none, the index of the widest set. """ - with tf.compat.v1.name_scope(name, 'find_best_interval_idx', [x]): + with tf1.name_scope(name, 'find_best_interval_idx', [x]): # Returns max_doublings + 1. Positive int32. k = tf.shape(input=x)[0] dtype = x.dtype.base_dtype @@ -176,7 +177,7 @@ def slice_bounds_by_doubling(x_initial, No. 3 , 705-767. https://projecteuclid.org/download/pdf_1/euclid.aos/1056562461 """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'slice_bounds_by_doubling', [x_initial, log_slice_heights, max_doublings, step_size]): seed_gen = SeedStream(seed, salt='slice_bounds_by_doubling') @@ -261,7 +262,7 @@ def _test_acceptance(x_initial, target_log_prob, decided, log_slice_heights, acceptable: A boolean tensor of same shape as `x_initial` indicating whether the proposed points are acceptable for reversibility or not. """ - with tf.compat.v1.name_scope(name, 'test_acceptance', [ + with tf1.name_scope(name, 'test_acceptance', [ x_initial, decided, log_slice_heights, x_proposed, step_size, lower_bounds, upper_bounds ]): @@ -280,8 +281,8 @@ def body(acceptable, decided, left, right, d): divided = (((x_initial < midpoint) & (x_proposed >= midpoint)) | ((x_proposed < midpoint) & (x_initial >= midpoint))) next_d = d | divided - next_right = tf.compat.v1.where(x_proposed < midpoint, midpoint, right) - next_left = tf.compat.v1.where(x_proposed >= midpoint, midpoint, left) + next_right = tf1.where(x_proposed < midpoint, midpoint, right) + next_left = tf1.where(x_proposed >= midpoint, midpoint, left) left_test = (log_slice_heights >= target_log_prob(next_left)) right_test = (log_slice_heights >= target_log_prob(next_right)) unacceptable = next_d & left_test & right_test @@ -289,7 +290,7 @@ def body(acceptable, decided, left, right, d): # and are unacceptable, set acceptable to False. For others, let them # be as they were. now_decided = ~decided & unacceptable - next_acceptable = tf.compat.v1.where(now_decided, ~unacceptable, + next_acceptable = tf1.where(now_decided, ~unacceptable, acceptable) # Decided if (a) was already decided, or # (b) the new width is less than 1.1 step_size, or @@ -341,7 +342,7 @@ def _sample_with_shrinkage(x_initial, target_log_prob, log_slice_heights, x_proposed: A tensor of the same shape and dtype as `x_initial`. The next proposed state of the chain. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'sample_with_shrinkage', [x_initial, log_slice_heights, step_size, lower_bounds, upper_bounds]): seed_gen = SeedStream(seed, salt='_sample_with_shrinkage') @@ -355,7 +356,7 @@ def _body(found, left, right, x_next): """Iterates until every chain has found a suitable next state.""" proportions = tf.random.uniform( x_initial_shape, dtype=x_initial_dtype, seed=seed_gen()) - x_proposed = tf.compat.v1.where(~found, + x_proposed = tf1.where(~found, left + proportions * (right - left), x_next) accept_res = _test_acceptance(x_initial, target_log_prob=target_log_prob, @@ -371,8 +372,8 @@ def _body(found, left, right, x_next): # algorithm in Neal). However, this does not matter because the endpoints # for points that have been already accepted are not used again so it # doesn't matter what we do with them. - next_left = tf.compat.v1.where(x_proposed < x_initial, x_proposed, left) - next_right = tf.compat.v1.where(x_proposed >= x_initial, x_proposed, + next_left = tf1.where(x_proposed < x_initial, x_proposed, left) + next_right = tf1.where(x_proposed >= x_initial, x_proposed, right) return next_found, next_left, next_right, x_proposed @@ -415,7 +416,7 @@ def slice_sampler_one_dim(target_log_prob, x_initial, step_size=0.01, lower_bounds: Tensor of the same shape and dtype as `x_initial`. The lower bounds for the slice found. """ - with tf.compat.v1.name_scope(name, 'slice_sampler_one_dim', + with tf1.name_scope(name, 'slice_sampler_one_dim', [x_initial, step_size, max_doublings]): x_initial = tf.convert_to_tensor(value=x_initial) # Obtain the input dtype of the array. diff --git a/tensorflow_probability/python/mcmc/internal/util.py b/tensorflow_probability/python/mcmc/internal/util.py index 48e262f32d..dc201e40c3 100644 --- a/tensorflow_probability/python/mcmc/internal/util.py +++ b/tensorflow_probability/python/mcmc/internal/util.py @@ -23,7 +23,8 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.math.gradient import value_and_gradient as tfp_math_value_and_gradients @@ -64,7 +65,7 @@ def is_namedtuple_like(x): def make_name(super_name, default_super_name, sub_name): - """Helper which makes a `str` name; useful for tf.compat.v1.name_scope.""" + """Helper which makes a `str` name; useful for tf1.name_scope.""" name = super_name if super_name is not None else default_super_name if sub_name is not None: name += '_' + sub_name @@ -78,7 +79,7 @@ def _choose_base_case(is_accepted, """Helper to `choose` which expand_dims `is_accepted` and applies tf.where.""" def _expand_is_accepted_like(x): """Helper to expand `is_accepted` like the shape of some input arg.""" - with tf.compat.v1.name_scope('expand_is_accepted_like'): + with tf1.name_scope('expand_is_accepted_like'): expand_shape = tf.concat([ tf.shape(input=is_accepted), tf.ones([tf.rank(x) - tf.rank(is_accepted)], dtype=tf.int32), @@ -98,12 +99,12 @@ def _where(accepted, rejected): return accepted accepted = tf.convert_to_tensor(value=accepted, name='accepted') rejected = tf.convert_to_tensor(value=rejected, name='rejected') - r = tf.compat.v1.where( + r = tf1.where( _expand_is_accepted_like(accepted), accepted, rejected) r.set_shape(r.shape.merge_with(accepted.shape.merge_with(rejected.shape))) return r - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'choose', values=[is_accepted, accepted, rejected]): if not is_list_like(accepted): return _where(accepted, rejected) @@ -151,7 +152,7 @@ def safe_sum(x, alt_value=-np.inf, name=None): TypeError: if `x` is not list-like. ValueError: if `x` is empty. """ - with tf.compat.v1.name_scope(name, 'safe_sum', [x, alt_value]): + with tf1.name_scope(name, 'safe_sum', [x, alt_value]): if not is_list_like(x): raise TypeError('Expected list input.') if not x: @@ -161,7 +162,7 @@ def safe_sum(x, alt_value=-np.inf, name=None): x = tf.reduce_sum(input_tensor=x, axis=-1) alt_value = np.array(alt_value, x.dtype.as_numpy_dtype) alt_fill = tf.fill(tf.shape(input=x), value=alt_value) - x = tf.compat.v1.where(tf.math.is_finite(x), x, alt_fill) + x = tf1.where(tf.math.is_finite(x), x, alt_fill) x.set_shape(x.shape.merge_with(in_shape)) return x @@ -176,7 +177,7 @@ def _doc(func): def _value_and_gradients(fn, fn_arg_list, result=None, grads=None, name=None): """Helper to `maybe_call_fn_and_grads`.""" - with tf.compat.v1.name_scope(name, 'value_and_gradients', + with tf1.name_scope(name, 'value_and_gradients', [fn_arg_list, result, grads]): def _convert_to_tensor(x, name): @@ -226,7 +227,7 @@ def maybe_call_fn_and_grads(fn, check_non_none_grads=True, name=None): """Calls `fn` and computes the gradient of the result wrt `args_list`.""" - with tf.compat.v1.name_scope(name, 'maybe_call_fn_and_grads', + with tf1.name_scope(name, 'maybe_call_fn_and_grads', [fn_arg_list, result, grads]): fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list) else [fn_arg_list]) @@ -270,12 +271,12 @@ def smart_for_loop(loop_num_iter, body_fn, initial_loop_vars, Returns: result: `Tensor` representing applying `body_fn` iteratively `n` times. """ - with tf.compat.v1.name_scope(name, 'smart_for_loop', + with tf1.name_scope(name, 'smart_for_loop', [loop_num_iter, initial_loop_vars]): loop_num_iter_ = tf.get_static_value(loop_num_iter) if (loop_num_iter_ is None or tf.executing_eagerly() or control_flow_util.GraphOrParentsInXlaContext( - tf.compat.v1.get_default_graph())): + tf1.get_default_graph())): # Cast to int32 to run the comparison against i in host memory, # where while/LoopCond needs it. loop_num_iter = tf.cast(loop_num_iter, dtype=tf.int32) @@ -326,9 +327,9 @@ def trace_scan(loop_fn, `Tensor` being a stack of the corresponding `Tensors` in the return value of `trace_fn` for each slice of `elems`. """ - with tf.compat.v1.name_scope( - name, 'trace_scan', [initial_state, elems]), tf.compat.v1.variable_scope( - tf.compat.v1.get_variable_scope()) as vs: + with tf1.name_scope( + name, 'trace_scan', [initial_state, elems]), tf1.variable_scope( + tf1.get_variable_scope()) as vs: if vs.caching_device is None and not tf.executing_eagerly(): vs.set_caching_device(lambda op: op.device) diff --git a/tensorflow_probability/python/mcmc/internal/util_test.py b/tensorflow_probability/python/mcmc/internal/util_test.py index ef7cfb1460..4be38bcd10 100644 --- a/tensorflow_probability/python/mcmc/internal/util_test.py +++ b/tensorflow_probability/python/mcmc/internal/util_test.py @@ -20,22 +20,22 @@ import collections import warnings + # Dependency imports from absl.testing import parameterized import numpy as np -import tensorflow as tf -import tensorflow_probability as tfp - +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.mcmc.internal import util from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions - @test_util.run_all_in_graph_and_eager_modes -class ChooseTest(tf.test.TestCase): +class ChooseTest(test_case.TestCase): def test_works_for_nested_namedtuple(self): Results = collections.namedtuple('Results', ['field1', 'inner']) # pylint: disable=invalid-name @@ -94,7 +94,7 @@ def test_selects_batch_members_from_list_of_arrays(self): self.assertAllEqual(expected, chosen_) -class IsNamedTupleLikeTest(tf.test.TestCase): +class IsNamedTupleLikeTest(test_case.TestCase): def test_true_for_namedtuple_without_fields(self): NoFields = collections.namedtuple('NoFields', []) # pylint: disable=invalid-name @@ -114,7 +114,7 @@ def test_false_for_base_case(self): self.assertFalse(util.is_namedtuple_like(np.int32())) -class GradientTest(tf.test.TestCase): +class GradientTest(test_case.TestCase): def testGradientComputesCorrectly(self): dtype = np.float32 @@ -160,7 +160,7 @@ def fn(x, y): @test_util.run_all_in_graph_and_eager_modes -class SmartForLoopTest(tf.test.TestCase): +class SmartForLoopTest(test_case.TestCase): def test_python_for_loop(self): counter = None @@ -181,7 +181,7 @@ def body(x): def test_tf_while_loop(self): iters = 10 - n = tf.compat.v1.placeholder_with_default(input=np.int64(iters), shape=()) + n = tf1.placeholder_with_default(input=np.int64(iters), shape=()) counter = collections.Counter() def body(x): counter['body_calls'] += 1 @@ -195,7 +195,7 @@ def body(x): @test_util.run_all_in_graph_and_eager_modes -class TraceScanTest(tf.test.TestCase): +class TraceScanTest(test_case.TestCase): def testBasic(self): @@ -228,7 +228,7 @@ def _test_setter_fn(simple_results, increment=1): return simple_results._replace(value=simple_results.value + increment) -class MakeInnermostSetterTest(tf.test.TestCase): +class MakeInnermostSetterTest(test_case.TestCase): def testNoWrapper(self): results = SimpleResults(1) @@ -262,7 +262,7 @@ def testTwoWrappers(self): self.assertEqual(2, new_results.inner_results.inner_results.value) -class MakeInnermostGetterTest(tf.test.TestCase): +class MakeInnermostGetterTest(test_case.TestCase): def testNoWrapper(self): results = SimpleResults(1) @@ -323,7 +323,7 @@ class FakeInnerNoParameters(object): pass -class EnableStoreParametersInResultsTest(tf.test.TestCase, +class EnableStoreParametersInResultsTest(test_case.TestCase, parameterized.TestCase): @parameterized.parameters(FakeInnerOld(), @@ -375,12 +375,12 @@ class TensorConvertible(object): TensorConvertible, conversion_func=lambda *args: tf.constant(0)) -class SimpleTensorWarningTest(tf.test.TestCase, parameterized.TestCase): +class SimpleTensorWarningTest(test_case.TestCase, parameterized.TestCase): # We must defer creating the TF objects until the body of the test. # pylint: disable=unnecessary-lambda @parameterized.parameters([lambda: tf.Variable(0)], - [lambda: tf.compat.v2.Variable(0)], + [lambda: tf.Variable(0)], [lambda: TensorConvertible()]) def testWarn(self, tensor_callable): tensor = tensor_callable() diff --git a/tensorflow_probability/python/mcmc/langevin.py b/tensorflow_probability/python/mcmc/langevin.py index f9ba5fe647..633eea7199 100644 --- a/tensorflow_probability/python/mcmc/langevin.py +++ b/tensorflow_probability/python/mcmc/langevin.py @@ -21,7 +21,8 @@ import collections # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.math import diag_jacobian @@ -481,7 +482,7 @@ def is_calibrated(self): @mcmc_util.set_doc(MetropolisAdjustedLangevinAlgorithm.one_step.__doc__) def one_step(self, current_state, previous_kernel_results): - with tf.compat.v1.name_scope( + with tf1.name_scope( name=mcmc_util.make_name(self.name, 'mala', 'one_step'), values=[ self.step_size, current_state, @@ -490,7 +491,7 @@ def one_step(self, current_state, previous_kernel_results): previous_kernel_results.volatility, previous_kernel_results.diffusion_drift ]): - with tf.compat.v1.name_scope('initialize'): + with tf1.name_scope('initialize'): # Prepare input arguments to be passed to `_euler_method`. [ current_state_parts, @@ -584,7 +585,7 @@ def maybe_flatten(x): @mcmc_util.set_doc( MetropolisAdjustedLangevinAlgorithm.bootstrap_results.__doc__) def bootstrap_results(self, init_state): - with tf.compat.v1.name_scope( + with tf1.name_scope( name=mcmc_util.make_name(self.name, 'mala', 'bootstrap_results'), values=[init_state]): init_state_parts = (list(init_state) @@ -669,7 +670,7 @@ def _euler_method(random_draw_parts, state(s) of the Markov chain(s) at each result step. Has same shape as input `current_state_parts`. """ - with tf.compat.v1.name_scope(name, 'mala_euler_method', [ + with tf1.name_scope(name, 'mala_euler_method', [ random_draw_parts, state_parts, drift_parts, step_size_parts, volatility_parts ]): @@ -724,7 +725,7 @@ def _get_drift(step_size_parts, volatility_parts, grads_volatility, input `current_state_parts`. """ - with tf.compat.v1.name_scope(name, 'mala_get_drift', [ + with tf1.name_scope(name, 'mala_get_drift', [ step_size_parts, volatility_parts, grads_volatility, grads_target_log_prob ]): @@ -803,7 +804,7 @@ def _compute_log_acceptance_correction(current_state_parts, acceptance-correction. (See docstring for mathematical definition.) """ - with tf.compat.v1.name_scope(name, 'compute_log_acceptance_correction', [ + with tf1.name_scope(name, 'compute_log_acceptance_correction', [ current_state_parts, proposed_state_parts, current_volatility_parts, proposed_volatility_parts, current_drift_parts, proposed_drift_parts, step_size_parts, independent_chain_ndims diff --git a/tensorflow_probability/python/mcmc/langevin_test.py b/tensorflow_probability/python/mcmc/langevin_test.py index 0748461ffd..cc53a9c768 100644 --- a/tensorflow_probability/python/mcmc/langevin_test.py +++ b/tensorflow_probability/python/mcmc/langevin_test.py @@ -19,18 +19,18 @@ from __future__ import print_function # Dependency imports -import numpy as np -import tensorflow as tf +import numpy as np +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions - @test_util.run_all_in_graph_and_eager_modes -class LangevinTest(tf.test.TestCase): +class LangevinTest(test_case.TestCase): def testLangevin1DNormal(self): """Sampling from the Standard Normal Distribution.""" diff --git a/tensorflow_probability/python/mcmc/metropolis_hastings.py b/tensorflow_probability/python/mcmc/metropolis_hastings.py index dc65fb86da..9aef9eb4f7 100644 --- a/tensorflow_probability/python/mcmc/metropolis_hastings.py +++ b/tensorflow_probability/python/mcmc/metropolis_hastings.py @@ -21,7 +21,8 @@ import collections import warnings -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.mcmc import kernel as kernel_base from tensorflow_probability.python.mcmc.internal import util as mcmc_util @@ -181,7 +182,7 @@ def one_step(self, current_state, previous_kernel_results): ValueError: if `inner_kernel` results doesn't contain the member "target_log_prob". """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name=mcmc_util.make_name(self.name, 'mh', 'one_step'), values=[current_state, previous_kernel_results]): # Take one inner step. @@ -258,7 +259,7 @@ def bootstrap_results(self, init_state): ValueError: if `inner_kernel` results doesn't contain the member "target_log_prob". """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name=mcmc_util.make_name(self.name, 'mh', 'bootstrap_results'), values=[init_state]): pkr = self.inner_kernel.bootstrap_results(init_state) diff --git a/tensorflow_probability/python/mcmc/metropolis_hastings_test.py b/tensorflow_probability/python/mcmc/metropolis_hastings_test.py index ec2e6e2634..da1a1c0708 100644 --- a/tensorflow_probability/python/mcmc/metropolis_hastings_test.py +++ b/tensorflow_probability/python/mcmc/metropolis_hastings_test.py @@ -23,14 +23,15 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.mcmc.internal.util import is_list_like - from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import InnerKernelResultsWithoutCorrection = collections.namedtuple( + 'InnerKernelResultsWithoutCorrection', [ 'target_log_prob', # For "next_state". @@ -130,7 +131,7 @@ def bootstrap_results(_): @test_util.run_all_in_graph_and_eager_modes -class MetropolisHastingsTest(tf.test.TestCase): +class MetropolisHastingsTest(test_case.TestCase): def setUp(self): self.dtype = np.float32 diff --git a/tensorflow_probability/python/mcmc/nuts.py b/tensorflow_probability/python/mcmc/nuts.py index f597d55fce..e345ae1716 100644 --- a/tensorflow_probability/python/mcmc/nuts.py +++ b/tensorflow_probability/python/mcmc/nuts.py @@ -61,7 +61,7 @@ # Whether to use U turn criteria in [1] or generalized U turn criteria in [2] # to check the tree trajectory. -GENERALIZED_UTURN = True # Default: True +GENERALIZED_UTURN = False # Default: True ############################################################## ### END STATIC CONFIGURATION ################################# ############################################################## diff --git a/tensorflow_probability/python/mcmc/nuts_test.py b/tensorflow_probability/python/mcmc/nuts_test.py index b311450306..816e15216b 100644 --- a/tensorflow_probability/python/mcmc/nuts_test.py +++ b/tensorflow_probability/python/mcmc/nuts_test.py @@ -18,21 +18,21 @@ from __future__ import division from __future__ import print_function - # Dependency imports + from absl.testing import parameterized import numpy as np - import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.distributions.internal import statistical_testing as st from tensorflow_probability.python.internal import assert_util -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import +from tensorflow_probability.python.internal import test_case +from tensorflow_probability.python.internal import test_util as tfp_test_util -tfb = tfp.bijectors -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @tf.function(autograph=False) @@ -74,7 +74,7 @@ def assert_univariate_target_conservation(test, target_d, step_size): # the sample count. num_samples = int(5e4) num_steps = 1 - strm = tfp.util.SeedStream(salt='univariate_nuts_test', seed=1) + strm = tfp_test_util.test_seed_stream() initialization = target_d.sample([num_samples], seed=strm()) @tf.function(autograph=False) @@ -140,7 +140,7 @@ def assert_mvn_target_conservation(event_size, batch_size, **kwargs): @test_util.run_all_in_graph_and_eager_modes -class NutsTest(parameterized.TestCase, tf.test.TestCase): +class NutsTest(parameterized.TestCase, test_case.TestCase): def testUnivariateNormalTargetConservation(self): normal_dist = tfd.Normal(loc=1., scale=2.) @@ -175,7 +175,7 @@ def testMultivariateNormalNd(self, event_size, batch_size): ([2, 5], 100), # test rank 2 case ) def testLatentsOfMixedRank(self, batch_shape, num_steps): - strm = tfp.util.SeedStream(5, salt='LatentsOfMixedRankTest') + strm = tfp_test_util.test_seed_stream() init0 = [tf.ones(batch_shape + [6])] init1 = [tf.ones(batch_shape + []), @@ -231,7 +231,7 @@ def log_prob1(state0, state1, state2): # (500, 1000, 20), ) def testMultivariateNormalNdConvergence(self, nsamples, nchains, nd): - strm = tfp.util.SeedStream(1, salt='MultivariateNormalNdConvergence') + strm = tfp_test_util.test_seed_stream() theta0 = np.zeros((nchains, nd)) mu = np.arange(nd) w = np.random.randn(nd, nd) * 0.1 @@ -239,7 +239,7 @@ def testMultivariateNormalNdConvergence(self, nsamples, nchains, nd): step_size = np.random.rand(nchains, 1) * 0.1 + 1. @tf.function(autograph=False) - def run_nuts(mu, scale_tril, step_size, nsamples, state): + def run_chain_and_get_summary(mu, scale_tril, step_size, nsamples, state): def target_log_prob_fn(event): with tf.name_scope('nuts_test_target_log_prob'): return tfd.MultivariateNormalTriL( @@ -273,7 +273,8 @@ def trace_fn(_, pkr): leapfrogs_taken_[is_accepted[1:]]) sample_shape, sample_mean, sample_cov, leapfrogs_taken = self.evaluate( - run_nuts(mu, np.linalg.cholesky(cov), step_size, nsamples, theta0)) + run_chain_and_get_summary( + mu, np.linalg.cholesky(cov), step_size, nsamples, theta0)) self.assertAllEqual(sample_shape, [nsamples, nchains, nd]) self.assertAllClose(mu, sample_mean, atol=0.1, rtol=0.1) @@ -282,6 +283,54 @@ def trace_fn(_, pkr): self.assertTrue( np.any(np.isin(np.asarray([5, 9, 11, 13]), np.unique(leapfrogs_taken)))) + def testCorrelated2dNormalwithinMCError(self): + strm = tfp_test_util.test_seed_stream() + nchains, num_steps = 10, 500 + mu = np.asarray([0., 3.], dtype=np.float32) + rho = 0.75 + sigma1 = 1. + sigma2 = 2. + cov = np.asarray([[sigma1 * sigma1, rho * sigma1 * sigma2], + [rho * sigma1 * sigma2, sigma2 * sigma2]], + dtype=np.float32) + true_param = np.hstack([mu, np.array([sigma1**2, sigma2**2, rho])]) + scale_tril = np.linalg.cholesky(cov) + initial_state = np.zeros((nchains, 2), np.float32) + + @tf.function(autograph=False) + def run_chain_and_get_estimation_error(): + chain_state = tfp.mcmc.sample_chain( + num_results=num_steps, + num_burnin_steps=0, + current_state=initial_state, + kernel=tfp.mcmc.NoUTurnSampler( + tfd.MultivariateNormalTriL(loc=mu, + scale_tril=scale_tril).log_prob, + step_size=1., + seed=strm()), + trace_fn=None) + variance_est = tf.square(chain_state - mu) + correlation_est = ((chain_state[..., 0] - mu[0]) * + (chain_state[..., 1] - mu[1]) / + (sigma1 * sigma2))[..., tf.newaxis] + mcmc_samples = tf.concat([chain_state, variance_est, correlation_est], + axis=-1) + + expected = tf.reduce_mean(mcmc_samples, axis=[0, 1]) + + ess = tf.reduce_sum(tfp.mcmc.effective_sample_size(mcmc_samples), axis=0) + avg_monte_carlo_standard_error = tf.reduce_mean( + tf.math.reduce_std(mcmc_samples, axis=0), + axis=0) / tf.sqrt(ess) + scaled_error = ( + tf.abs(expected - true_param) / avg_monte_carlo_standard_error) + + return tfd.Normal(loc=0., scale=1.).prob(scaled_error) + + # Probability of getting this error + error_prob = self.evaluate(run_chain_and_get_estimation_error()) + self.assertAllGreater(error_prob, 0.01) + @parameterized.parameters( (7, 5, 3, None), (7, 5, 1, tf.TensorShape([None, 1])), @@ -315,7 +364,7 @@ def testDivergence(self): ) @tf.function(autograph=False) - def run_chain(): + def run_chain_and_get_divergence(): nchains = 5 init_states = neals_funnel.sample(nchains, seed=strm()) _, has_divergence = tfp.mcmc.sample_chain( @@ -328,14 +377,14 @@ def run_chain(): trace_fn=lambda _, pkr: pkr.has_divergence) return tf.reduce_sum(tf.cast(has_divergence, dtype=tf.int32)) - divergence_count = self.evaluate(run_chain()) + divergence_count = self.evaluate(run_chain_and_get_divergence()) # Test that we observe a fair among of divergence. self.assertAllGreater(divergence_count, 100) def testSampleEndtoEnd(self): """An end-to-end test of sampling using NUTS.""" - strm = tfp.util.SeedStream(1, salt='EndtoEndTest') + strm = tfp_test_util.test_seed_stream() predictors = tf.cast([ 201., 244., 47., 287., 203., 58., 210., 202., 198., 158., 165., 201., 157., 131., 166., 160., 186., 125., 218., 146. @@ -370,7 +419,7 @@ def testSampleEndtoEnd(self): number_of_steps, burnin, nchain = 100, 50, 50 @tf.function(autograph=False) - def run_chain(): + def run_chain_and_get_diagnostic(): # random initialization of the starting postion of each chain b0, b1, df, _ = robust_lm.sample(nchain, seed=strm()) @@ -418,7 +467,7 @@ def trace_fn(_, pkr): # Sample from posterior distribution and get diagnostic [ final_step_size, average_accept_ratio, average_rhat - ] = self.evaluate(run_chain()) + ] = self.evaluate(run_chain_and_get_diagnostic()) # Check that step size adaptation reduced the initial step size self.assertAllLess( diff --git a/tensorflow_probability/python/mcmc/random_walk_metropolis.py b/tensorflow_probability/python/mcmc/random_walk_metropolis.py index ccdb4844e4..5e38271997 100644 --- a/tensorflow_probability/python/mcmc/random_walk_metropolis.py +++ b/tensorflow_probability/python/mcmc/random_walk_metropolis.py @@ -21,7 +21,8 @@ import collections # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.mcmc import kernel as kernel_base from tensorflow_probability.python.mcmc import metropolis_hastings @@ -87,7 +88,7 @@ def _fn(state_parts, seed): Raises: ValueError: if `scale` does not broadcast with `state_parts`. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'random_walk_normal_fn', values=[state_parts, scale, seed]): scales = scale if mcmc_util.is_list_like(scale) else [scale] if len(scales) == 1: @@ -149,7 +150,7 @@ def _fn(state_parts, seed): Raises: ValueError: if `scale` does not broadcast with `state_parts`. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'random_walk_uniform_fn', values=[state_parts, scale, seed]): scales = scale if mcmc_util.is_list_like(scale) else [scale] if len(scales) == 1: @@ -501,12 +502,12 @@ def is_calibrated(self): @mcmc_util.set_doc(RandomWalkMetropolis.one_step.__doc__) def one_step(self, current_state, previous_kernel_results): - with tf.compat.v1.name_scope( + with tf1.name_scope( name=mcmc_util.make_name(self.name, 'rwm', 'one_step'), values=[ self.seed, current_state, previous_kernel_results.target_log_prob ]): - with tf.compat.v1.name_scope('initialize'): + with tf1.name_scope('initialize'): if mcmc_util.is_list_like(current_state): current_state_parts = list(current_state) else: @@ -536,7 +537,7 @@ def maybe_flatten(x): @mcmc_util.set_doc(RandomWalkMetropolis.bootstrap_results.__doc__) def bootstrap_results(self, init_state): - with tf.compat.v1.name_scope(self.name, 'rwm_bootstrap_results', + with tf1.name_scope(self.name, 'rwm_bootstrap_results', [init_state]): if not mcmc_util.is_list_like(init_state): init_state = [init_state] diff --git a/tensorflow_probability/python/mcmc/random_walk_metropolis_test.py b/tensorflow_probability/python/mcmc/random_walk_metropolis_test.py index 7d3ed62776..2fad2c7305 100644 --- a/tensorflow_probability/python/mcmc/random_walk_metropolis_test.py +++ b/tensorflow_probability/python/mcmc/random_walk_metropolis_test.py @@ -19,16 +19,15 @@ from __future__ import print_function # Dependency imports -import numpy as np - -import tensorflow as tf +import numpy as np +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions - def _reduce_variance(x, axis=None, keepdims=False): sample_mean = tf.math.reduce_mean(input_tensor=x, axis=axis, keepdims=True) @@ -39,7 +38,7 @@ def _reduce_variance(x, axis=None, keepdims=False): @test_util.run_all_in_graph_and_eager_modes -class RWMTest(tf.test.TestCase): +class RWMTest(test_case.TestCase): def testRWM1DUniform(self): """Sampling from the Standard Normal Distribution.""" diff --git a/tensorflow_probability/python/mcmc/replica_exchange_mc.py b/tensorflow_probability/python/mcmc/replica_exchange_mc.py index cc3073a409..f76db30983 100644 --- a/tensorflow_probability/python/mcmc/replica_exchange_mc.py +++ b/tensorflow_probability/python/mcmc/replica_exchange_mc.py @@ -20,7 +20,8 @@ import collections -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.mcmc import kernel as kernel_base from tensorflow_probability.python.mcmc.internal import util as mcmc_util @@ -333,7 +334,7 @@ def one_step(self, current_state, previous_kernel_results): # Key difficulty: The type of exchanges differs from one call to the # next...even the number of exchanges can differ. # As a result, exchanges must happen dynamically, in while loops. - with tf.compat.v1.name_scope( + with tf1.name_scope( name=mcmc_util.make_name(self.name, 'remc', 'one_step'), values=[current_state, previous_kernel_results]): @@ -381,7 +382,7 @@ def one_step(self, current_state, previous_kernel_results): old_states, exchange_proposed, exchange_proposed_n, sampled_replica_states, sampled_replica_results) - no_exchange_proposed, _ = tf.compat.v1.setdiff1d( + no_exchange_proposed, _ = tf1.setdiff1d( tf.range(self.num_replica), tf.reshape(exchange_proposed, [-1])) exchanged_states = self._insert_old_states_where_no_exchange_was_proposed( @@ -420,7 +421,7 @@ def _get_exchanged_states(self, old_states, exchange_proposed, exchange_proposed_n, sampled_replica_states, sampled_replica_results): """Get list of TensorArrays holding exchanged states, and zeros.""" - with tf.compat.v1.name_scope('get_exchanged_states'): + with tf1.name_scope('get_exchanged_states'): target_log_probs = [] for replica in range(self.num_replica): @@ -459,7 +460,7 @@ def _get_exchanged_states(self, old_states, exchange_proposed, def _swap(is_exchange_accepted, x, y): """Swap batches of x, y where accepted.""" - with tf.compat.v1.name_scope('swap_where_exchange_accepted'): + with tf1.name_scope('swap_where_exchange_accepted'): new_x = mcmc_util.choose(is_exchange_accepted, y, x) new_y = mcmc_util.choose(is_exchange_accepted, x, y) return new_x, new_y @@ -499,7 +500,7 @@ def body(i, exchanged_states): def _insert_old_states_where_no_exchange_was_proposed( self, no_exchange_proposed, old_states, exchanged_states): - with tf.compat.v1.name_scope( + with tf1.name_scope( 'insert_old_states_where_no_exchange_was_proposed'): def cond(j, unused_exchanged_states): @@ -528,7 +529,7 @@ def bootstrap_results(self, init_state): `Tensor`s representing internal calculations made within this function. This inculdes replica states. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name=mcmc_util.make_name(self.name, 'remc', 'bootstrap_results'), values=[init_state]): replica_results = [ diff --git a/tensorflow_probability/python/mcmc/replica_exchange_mc_test.py b/tensorflow_probability/python/mcmc/replica_exchange_mc_test.py index 15ca13b86f..50382cecfd 100644 --- a/tensorflow_probability/python/mcmc/replica_exchange_mc_test.py +++ b/tensorflow_probability/python/mcmc/replica_exchange_mc_test.py @@ -19,15 +19,16 @@ from __future__ import print_function # Dependency imports -import numpy as np -import tensorflow as tf +import numpy as np +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions - def _set_seed(seed): """Helper which uses graph seed if using TFE.""" @@ -38,10 +39,10 @@ def _set_seed(seed): @test_util.run_all_in_graph_and_eager_modes -class DefaultExchangeProposedFnTest(tf.test.TestCase): +class DefaultExchangeProposedFnTest(test_case.TestCase): def setUp(self): - tf.compat.v1.set_random_seed(123) + tf1.set_random_seed(123) def generate_exchanges(self, exchange_proposed_fn, num_replica, seed): @@ -49,11 +50,11 @@ def _scan_fn(*_): exchange = exchange_proposed_fn(num_replica, seed) flat_replicas = tf.reshape(exchange, [-1]) with tf.control_dependencies([ - tf.compat.v1.assert_equal( + tf1.assert_equal( tf.size(input=flat_replicas), tf.size(input=tf.unique(flat_replicas)[0])), - tf.compat.v1.assert_greater_equal(flat_replicas, 0), - tf.compat.v1.assert_less(flat_replicas, num_replica), + tf1.assert_greater_equal(flat_replicas, 0), + tf1.assert_less(flat_replicas, num_replica), ]): return tf.shape(input=exchange)[0] @@ -155,10 +156,10 @@ def testProbExchange0p0(self): @test_util.run_all_in_graph_and_eager_modes -class REMCTest(tf.test.TestCase): +class REMCTest(test_case.TestCase): def setUp(self): - tf.compat.v1.set_random_seed(123) + tf1.set_random_seed(123) def _getNormalREMCSamples(self, inverse_temperatures, @@ -306,7 +307,7 @@ def _trace_log_accept_ratio(state, results): axis=0)) [sample_mean_, sample_std_, log_accept_ratios_] = self.evaluate( [sample_mean, sample_std, log_accept_ratios]) - tf.compat.v1.logging.vlog(1, 'log_accept_ratios: %s eager: %s', + tf1.logging.vlog(1, 'log_accept_ratios: %s eager: %s', log_accept_ratios_, tf.executing_eagerly()) self.assertAllClose(sample_mean_, [0., 0.], atol=0.3, rtol=0.3) diff --git a/tensorflow_probability/python/mcmc/sample.py b/tensorflow_probability/python/mcmc/sample.py index a539fcb3c6..f1c412bb26 100644 --- a/tensorflow_probability/python/mcmc/sample.py +++ b/tensorflow_probability/python/mcmc/sample.py @@ -25,7 +25,8 @@ import warnings # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.mcmc.internal import util as mcmc_util @@ -308,7 +309,7 @@ def trace_everything(states, previous_kernel_results): if not kernel.is_calibrated: warnings.warn("supplied `TransitionKernel` is not calibrated. Markov " "chain may not converge to intended target distribution.") - with tf.compat.v1.name_scope( + with tf1.name_scope( name, "mcmc_sample_chain", [num_results, num_burnin_steps, num_steps_between_results]): num_results = tf.convert_to_tensor( diff --git a/tensorflow_probability/python/mcmc/sample_annealed_importance.py b/tensorflow_probability/python/mcmc/sample_annealed_importance.py index 885805541a..ff07f6940e 100644 --- a/tensorflow_probability/python/mcmc/sample_annealed_importance.py +++ b/tensorflow_probability/python/mcmc/sample_annealed_importance.py @@ -22,7 +22,8 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.mcmc.internal import util as mcmc_util @@ -189,7 +190,7 @@ def target_log_prob_fn(weights): ``` """ - with tf.compat.v1.name_scope(name, "sample_annealed_importance_chain", + with tf1.name_scope(name, "sample_annealed_importance_chain", [num_steps, current_state]): num_steps = tf.convert_to_tensor( value=num_steps, dtype=tf.int32, name="num_steps") diff --git a/tensorflow_probability/python/mcmc/sample_annealed_importance_test.py b/tensorflow_probability/python/mcmc/sample_annealed_importance_test.py index e3d13dc98b..3a59242d1f 100644 --- a/tensorflow_probability/python/mcmc/sample_annealed_importance_test.py +++ b/tensorflow_probability/python/mcmc/sample_annealed_importance_test.py @@ -19,14 +19,17 @@ from __future__ import print_function import collections + # Dependency imports -import numpy as np -import tensorflow as tf +import numpy as np +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import def _compute_sample_variance(x, axis=None, keepdims=False): @@ -37,18 +40,18 @@ def _compute_sample_variance(x, axis=None, keepdims=False): keepdims=keepdims) -_maybe_seed = lambda s: tf.compat.v1.set_random_seed(s) if tf.executing_eagerly( +_maybe_seed = lambda s: tf1.set_random_seed(s) if tf.executing_eagerly( ) else s @test_util.run_all_in_graph_and_eager_modes -class SampleAnnealedImportanceTest(tf.test.TestCase): +class SampleAnnealedImportanceTest(test_case.TestCase): def setUp(self): self._shape_param = 5. self._rate_param = 10. - tf.compat.v1.random.set_random_seed(10003) + tf1.random.set_random_seed(10003) np.random.seed(10003) def _log_gamma_log_prob(self, x, event_dims=()): @@ -140,7 +143,7 @@ def make_kernel(tlp_fn): event_size, ]) - tf.compat.v1.logging.vlog( + tf1.logging.vlog( 1, ' log_true_normalizer: {}\n' ' log_estimated_normalizer: {}\n' ' ais_weights_size: {}\n' @@ -152,7 +155,7 @@ def make_kernel(tlp_fn): def _ais_gets_correct_log_normalizer_wrapper(self, independent_chain_ndims): """Tests that AIS yields reasonable estimates of normalizers.""" initial_draws = np.random.normal(size=[30, 2, 1]) - x_ph = tf.compat.v1.placeholder_with_default( + x_ph = tf1.placeholder_with_default( np.float32(initial_draws), shape=initial_draws.shape, name='x_ph') self._ais_gets_correct_log_normalizer(x_ph, independent_chain_ndims) diff --git a/tensorflow_probability/python/mcmc/sample_halton_sequence.py b/tensorflow_probability/python/mcmc/sample_halton_sequence.py index 793b0d3f4f..5da617f6d2 100644 --- a/tensorflow_probability/python/mcmc/sample_halton_sequence.py +++ b/tensorflow_probability/python/mcmc/sample_halton_sequence.py @@ -21,7 +21,8 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.util.seed_stream import SeedStream @@ -178,7 +179,7 @@ def sample_halton_sequence(dim, if not dtype.is_floating: raise ValueError('dtype must be of `float`-type') - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'sample', values=[num_results, sequence_indices]): # Here and in the following, the shape layout is as follows: # [sample dimension, event dimension, coefficient dimension]. @@ -211,7 +212,7 @@ def sample_halton_sequence(dim, # The mask is true for those coefficients that are irrelevant. weight_mask = exponents_by_axes >= max_sizes_by_axes - capped_exponents = tf.compat.v1.where(weight_mask, + capped_exponents = tf1.where(weight_mask, tf.zeros_like(exponents_by_axes), exponents_by_axes) weights = radixes ** capped_exponents @@ -326,7 +327,7 @@ def _get_indices(num_results, sequence_indices, dtype, name=None): Returns: indices: `Tensor` of dtype `dtype` and shape = `[n, 1, 1]`. """ - with tf.compat.v1.name_scope(name, '_get_indices', + with tf1.name_scope(name, '_get_indices', [num_results, sequence_indices]): if sequence_indices is None: num_results = tf.cast(num_results, dtype=dtype) diff --git a/tensorflow_probability/python/mcmc/sample_halton_sequence_test.py b/tensorflow_probability/python/mcmc/sample_halton_sequence_test.py index 2dfdb493d1..0d4f704dd8 100644 --- a/tensorflow_probability/python/mcmc/sample_halton_sequence_test.py +++ b/tensorflow_probability/python/mcmc/sample_halton_sequence_test.py @@ -19,28 +19,29 @@ from __future__ import print_function # Dependency imports -import numpy as np -import tensorflow as tf +import numpy as np +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.internal import monte_carlo +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions - def _set_seed(seed): """Helper which uses graph seed if using TFE.""" # TODO(b/68017812): Deprecate once TFE supports seed. if tf.executing_eagerly(): - tf.compat.v1.set_random_seed(seed) + tf1.set_random_seed(seed) return None return seed @test_util.run_all_in_graph_and_eager_modes -class HaltonSequenceTest(tf.test.TestCase): +class HaltonSequenceTest(test_case.TestCase): def test_known_values_small_bases(self): # The first five elements of the non-randomized Halton sequence diff --git a/tensorflow_probability/python/mcmc/sample_test.py b/tensorflow_probability/python/mcmc/sample_test.py index 64e35921f9..02d51389b0 100644 --- a/tensorflow_probability/python/mcmc/sample_test.py +++ b/tensorflow_probability/python/mcmc/sample_test.py @@ -24,13 +24,13 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import - TestTransitionKernelResults = collections.namedtuple( 'TestTransitionKernelResults', 'counter_1, counter_2') @@ -50,13 +50,13 @@ def is_calibrated(self): return True -class SampleChainTest(tf.test.TestCase): +class SampleChainTest(test_case.TestCase): def setUp(self): self._shape_param = 5. self._rate_param = 10. - tf.compat.v1.random.set_random_seed(10003) + tf1.random.set_random_seed(10003) np.random.seed(10003) def testChainWorksCorrelatedMultivariate(self): @@ -79,7 +79,7 @@ def target_log_prob(x, y): return -0.5 * tf.reduce_sum(input_tensor=z**2., axis=-1) if tf.executing_eagerly(): - tf.compat.v1.set_random_seed(54) + tf1.set_random_seed(54) states, _ = tfp.mcmc.sample_chain( num_results=num_results, current_state=[dtype(-2), dtype(2)], diff --git a/tensorflow_probability/python/mcmc/simple_step_size_adaptation.py b/tensorflow_probability/python/mcmc/simple_step_size_adaptation.py index f9d3799f75..bd2f654963 100644 --- a/tensorflow_probability/python/mcmc/simple_step_size_adaptation.py +++ b/tensorflow_probability/python/mcmc/simple_step_size_adaptation.py @@ -21,7 +21,8 @@ import collections import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import dtype_util from tensorflow_probability.python.mcmc import kernel as kernel_base @@ -62,7 +63,7 @@ def _get_differing_dims(a, b): b_shape = np.array(b.shape.as_list()) return np.where(a_shape != b_shape[:len(a_shape)])[0] else: - return tf.compat.v1.where( + return tf1.where( tf.not_equal(tf.shape(input=a), tf.shape(input=b)[:tf.rank(a)]))[:, 0] @@ -254,7 +255,7 @@ def __init__(self, inner_kernel = mcmc_util.enable_store_parameters_in_results(inner_kernel) - with tf.compat.v1.name_scope( + with tf1.name_scope( mcmc_util.make_name(name, 'simple_step_size_adaptation', '__init__'), values=[target_accept_prob, adaptation_rate, num_adaptation_steps]) as name: @@ -311,7 +312,7 @@ def parameters(self): return self._parameters def one_step(self, current_state, previous_kernel_results): - with tf.compat.v1.name_scope( + with tf1.name_scope( name=mcmc_util.make_name(self.name, 'simple_step_size_adaptation', 'one_step'), values=[current_state, previous_kernel_results]): @@ -391,7 +392,7 @@ def one_step(self, current_state, previous_kernel_results): step_size_part / (1. + previous_kernel_results.adaptation_rate)) new_step_size_parts.append( - tf.compat.v1.where( + tf1.where( previous_kernel_results.step < self.num_adaptation_steps, new_step_size_part, step_size_part)) new_step_size = tf.nest.pack_sequence_as(step_size, new_step_size_parts) @@ -402,7 +403,7 @@ def one_step(self, current_state, previous_kernel_results): new_step_size=new_step_size) def bootstrap_results(self, init_state): - with tf.compat.v1.name_scope( + with tf1.name_scope( name=mcmc_util.make_name(self.name, 'simple_step_size_adaptation', 'bootstrap_results'), values=[init_state]): @@ -426,9 +427,9 @@ def _maybe_validate_target_accept_prob(target_accept_prob, validate_args): if not validate_args: return target_accept_prob with tf.control_dependencies([ - tf.compat.v1.assert_positive( + tf1.assert_positive( target_accept_prob, message='`target_accept_prob` must be > 0.'), - tf.compat.v1.assert_less( + tf1.assert_less( target_accept_prob, tf.ones_like(target_accept_prob), message='`target_accept_prob` must be < 1.') diff --git a/tensorflow_probability/python/mcmc/simple_step_size_adaptation_test.py b/tensorflow_probability/python/mcmc/simple_step_size_adaptation_test.py index b1cc0dc0db..2651b037c0 100644 --- a/tensorflow_probability/python/mcmc/simple_step_size_adaptation_test.py +++ b/tensorflow_probability/python/mcmc/simple_step_size_adaptation_test.py @@ -24,14 +24,15 @@ from absl.testing import parameterized import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions _RATE = 1.01 @@ -129,7 +130,7 @@ def is_calibrated(self): @test_util.run_all_in_graph_and_eager_modes -class SimpleStepSizeAdaptationTest(tf.test.TestCase, parameterized.TestCase): +class SimpleStepSizeAdaptationTest(test_case.TestCase, parameterized.TestCase): def testTurnOnStoreParametersInKernelResults(self): kernel = FakeWrapperKernel(FakeSteppedKernel(step_size=0.5)) @@ -350,7 +351,7 @@ def _impl(): _impl() def testExample(self): - tf.compat.v1.random.set_random_seed(tfp_test_util.test_seed()) + tf1.random.set_random_seed(tfp_test_util.test_seed()) target_log_prob_fn = tfd.Normal(loc=0., scale=1.).log_prob num_burnin_steps = 500 num_results = 500 @@ -379,7 +380,7 @@ def testExample(self): @test_util.run_all_in_graph_and_eager_modes -class SimpleStepSizeAdaptationStaticBroadcastingTest(tf.test.TestCase, +class SimpleStepSizeAdaptationStaticBroadcastingTest(test_case.TestCase, parameterized.TestCase): use_static_shape = True @@ -420,7 +421,7 @@ def testBroadcasting(self, old_step_size, new_step_size): [[np.log(0.73), np.log(0.76), np.log(0.73)], [np.log(0.77), np.log(0.77), np.log(0.73)]], dtype=tf.float64) - log_accept_ratio = tf.compat.v1.placeholder_with_default( + log_accept_ratio = tf1.placeholder_with_default( input=log_accept_ratio, shape=log_accept_ratio.shape if self.use_static_shape else None) state = [ diff --git a/tensorflow_probability/python/mcmc/slice_sampler_kernel.py b/tensorflow_probability/python/mcmc/slice_sampler_kernel.py index 93046bc7a1..69b16d032b 100644 --- a/tensorflow_probability/python/mcmc/slice_sampler_kernel.py +++ b/tensorflow_probability/python/mcmc/slice_sampler_kernel.py @@ -21,7 +21,8 @@ import collections # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.mcmc import kernel as kernel_base @@ -278,13 +279,13 @@ def one_step(self, current_state, previous_kernel_results): `current_state`. TypeError: if `not target_log_prob.dtype.is_floating`. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name=mcmc_util.make_name(self.name, 'slice', 'one_step'), values=[ self.step_size, self.max_doublings, self._seed_stream, current_state, previous_kernel_results.target_log_prob ]): - with tf.compat.v1.name_scope('initialize'): + with tf1.name_scope('initialize'): [ current_state_parts, step_sizes, @@ -334,7 +335,7 @@ def maybe_flatten(x): ] def bootstrap_results(self, init_state): - with tf.compat.v1.name_scope( + with tf1.name_scope( name=mcmc_util.make_name(self.name, 'slice', 'bootstrap_results'), values=[init_state]): if not mcmc_util.is_list_like(init_state): @@ -428,7 +429,7 @@ def _sample_next(target_log_prob_fn, lower_bounds: `Tensor` of batch shape and the dtype of the input state. The lower bounds of the slices along the sampling direction. """ - with tf.compat.v1.name_scope(name, 'sample_next', [ + with tf1.name_scope(name, 'sample_next', [ current_state_parts, step_sizes, max_doublings, current_target_log_prob, batch_rank ]): diff --git a/tensorflow_probability/python/mcmc/slice_sampler_test.py b/tensorflow_probability/python/mcmc/slice_sampler_test.py index 38f83e65ac..d587b2c475 100644 --- a/tensorflow_probability/python/mcmc/slice_sampler_test.py +++ b/tensorflow_probability/python/mcmc/slice_sampler_test.py @@ -19,15 +19,17 @@ from __future__ import print_function import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class SliceSamplerTest(tf.test.TestCase): +class SliceSamplerTest(test_case.TestCase): def testOneDimNormal(self): """Sampling from the Standard Normal Distribution.""" @@ -128,8 +130,8 @@ def target_log_prob(x, y): init_state = [np.ones([num_chains, 1], dtype=dtype), np.ones([num_chains, 1], dtype=dtype)] placeholder_init_state = [ - tf.compat.v1.placeholder_with_default(init_state[0], shape=[None, 1]), - tf.compat.v1.placeholder_with_default(init_state[1], shape=[None, 1]) + tf1.placeholder_with_default(init_state[0], shape=[None, 1]), + tf1.placeholder_with_default(init_state[1], shape=[None, 1]) ] # Run Slice Samper for `num_results` iterations for `num_chains` # independent chains: @@ -179,8 +181,8 @@ def target_log_prob(x, y): init_state = [np.ones([num_chains, 1], dtype=dtype), np.ones([num_chains, 1], dtype=dtype)] placeholder_init_state = [ - tf.compat.v1.placeholder_with_default(init_state[0], shape=None), - tf.compat.v1.placeholder_with_default(init_state[1], shape=None) + tf1.placeholder_with_default(init_state[0], shape=None), + tf1.placeholder_with_default(init_state[1], shape=None) ] # Run Slice Samper for `num_results` iterations for `num_chains` # independent chains: diff --git a/tensorflow_probability/python/mcmc/text_messages_hmc.py b/tensorflow_probability/python/mcmc/text_messages_hmc.py index ac9c98da07..b1226e7f17 100644 --- a/tensorflow_probability/python/mcmc/text_messages_hmc.py +++ b/tensorflow_probability/python/mcmc/text_messages_hmc.py @@ -21,7 +21,8 @@ import time # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp tfd = tfp.distributions @@ -68,7 +69,7 @@ def benchmark_text_messages_hmc( """Runs HMC on the text-messages unnormalized posterior.""" if not tf.executing_eagerly(): - tf.compat.v1.reset_default_graph() + tf1.reset_default_graph() # Build a static, pretend dataset. count_data = tf.cast( @@ -80,7 +81,7 @@ def benchmark_text_messages_hmc( if tf.executing_eagerly(): count_data = count_data.numpy() else: - with tf.compat.v1.Session(): + with tf1.Session(): count_data = count_data.eval() # Define a closure over our joint_log_prob. @@ -93,7 +94,7 @@ def unnormalized_log_posterior(lambda1, lambda2, tau): sample_chain = tfp.mcmc.sample_chain # Initialize the step_size. (It will be automatically adapted.) - step_size = tf.compat.v2.Variable( + step_size = tf.Variable( name='step_size', initial_value=tf.constant(0.05, dtype=tf.float32), trainable=False) @@ -133,8 +134,8 @@ def computation(): # trial. is_accepted_tensor = computation() if not tf.executing_eagerly(): - session = tf.compat.v1.Session() - session.run(tf.compat.v1.global_variables_initializer()) + session = tf1.Session() + session.run(tf1.global_variables_initializer()) session.run(is_accepted_tensor) start_time = time.time() diff --git a/tensorflow_probability/python/mcmc/text_messages_hmc_eager_test.py b/tensorflow_probability/python/mcmc/text_messages_hmc_eager_test.py index b6bb4bf082..145c6ce782 100644 --- a/tensorflow_probability/python/mcmc/text_messages_hmc_eager_test.py +++ b/tensorflow_probability/python/mcmc/text_messages_hmc_eager_test.py @@ -12,11 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.mcmc.text_messages_hmc import TextMessagesHmcBenchmarkTestHarness -tf.compat.v1.enable_eager_execution() +tf1.enable_eager_execution() class EagerTextMessagesHmcBenchmark( diff --git a/tensorflow_probability/python/mcmc/text_messages_hmc_graph_test.py b/tensorflow_probability/python/mcmc/text_messages_hmc_graph_test.py index 864737a0b0..508d980ed0 100644 --- a/tensorflow_probability/python/mcmc/text_messages_hmc_graph_test.py +++ b/tensorflow_probability/python/mcmc/text_messages_hmc_graph_test.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import tensorflow as tf +import tensorflow.compat.v2 as tf from tensorflow_probability.python.mcmc.text_messages_hmc import TextMessagesHmcBenchmarkTestHarness diff --git a/tensorflow_probability/python/mcmc/transformed_kernel.py b/tensorflow_probability/python/mcmc/transformed_kernel.py index 50d04ab8cc..06b696224e 100644 --- a/tensorflow_probability/python/mcmc/transformed_kernel.py +++ b/tensorflow_probability/python/mcmc/transformed_kernel.py @@ -20,7 +20,8 @@ import collections -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.mcmc import kernel as kernel_base from tensorflow_probability.python.mcmc.internal import util as mcmc_util @@ -253,7 +254,7 @@ def one_step(self, current_state, previous_kernel_results): kernel_results: `collections.namedtuple` of internal calculations used to advance the chain. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name=mcmc_util.make_name(self.name, 'transformed_kernel', 'one_step'), values=[previous_kernel_results]): transformed_next_state, kernel_results = self._inner_kernel.one_step( @@ -320,7 +321,7 @@ def bootstrap_results(self, init_state=None, transformed_init_state=None): if (init_state is None) == (transformed_init_state is None): raise ValueError('Must specify exactly one of `init_state` ' 'or `transformed_init_state`.') - with tf.compat.v1.name_scope( + with tf1.name_scope( name=mcmc_util.make_name(self.name, 'transformed_kernel', 'bootstrap_results'), values=[init_state, transformed_init_state]): diff --git a/tensorflow_probability/python/mcmc/transformed_kernel_test.py b/tensorflow_probability/python/mcmc/transformed_kernel_test.py index 794fe5b636..da82946138 100644 --- a/tensorflow_probability/python/mcmc/transformed_kernel_test.py +++ b/tensorflow_probability/python/mcmc/transformed_kernel_test.py @@ -19,26 +19,27 @@ from __future__ import print_function import collections + # Dependency imports -import numpy as np -import tensorflow as tf +import numpy as np +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions -tfb = tfp.bijectors - - FakeInnerKernelResults = collections.namedtuple( 'FakeInnerKernelResults', ['target_log_prob']) def _maybe_seed(seed): if tf.executing_eagerly(): - tf.compat.v1.set_random_seed(seed) + tf1.set_random_seed(seed) return None return seed @@ -66,7 +67,7 @@ def bootstrap_results(self, init_state): @test_util.run_all_in_graph_and_eager_modes -class TransformedTransitionKernelTest(tf.test.TestCase): +class TransformedTransitionKernelTest(test_case.TestCase): def setUp(self): super(TransformedTransitionKernelTest, self).setUp() diff --git a/tensorflow_probability/python/monte_carlo/BUILD b/tensorflow_probability/python/monte_carlo/BUILD index be2ac0a507..e7110a1d30 100644 --- a/tensorflow_probability/python/monte_carlo/BUILD +++ b/tensorflow_probability/python/monte_carlo/BUILD @@ -54,5 +54,6 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/monte_carlo/expectation.py b/tensorflow_probability/python/monte_carlo/expectation.py index 0cb56b3283..702b46e191 100644 --- a/tensorflow_probability/python/monte_carlo/expectation.py +++ b/tensorflow_probability/python/monte_carlo/expectation.py @@ -18,7 +18,8 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf __all__ = [ @@ -155,7 +156,7 @@ def expectation(f, samples, log_prob=None, use_reparametrization=True, `callable`. """ - with tf.compat.v1.name_scope(name, 'expectation', [samples]): + with tf1.name_scope(name, 'expectation', [samples]): if not callable(f): raise ValueError('`f` must be a callable function.') if use_reparametrization: @@ -205,7 +206,7 @@ def _sample_max(values): def _get_samples(dist, z, n, seed): """Check args and return samples.""" - with tf.compat.v1.name_scope('get_samples', values=[z, n]): + with tf1.name_scope('get_samples', values=[z, n]): if (n is None) == (z is None): raise ValueError( 'Must specify exactly one of arguments "n" and "z". Found: ' diff --git a/tensorflow_probability/python/monte_carlo/expectation_test.py b/tensorflow_probability/python/monte_carlo/expectation_test.py index eb6fa03916..75aba39aa5 100644 --- a/tensorflow_probability/python/monte_carlo/expectation_test.py +++ b/tensorflow_probability/python/monte_carlo/expectation_test.py @@ -19,17 +19,16 @@ from __future__ import print_function # Dependency imports -import numpy as np -import tensorflow as tf +import numpy as np +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.monte_carlo.expectation import _get_samples -tfd = tfp.distributions - -class GetSamplesTest(tf.test.TestCase): +class GetSamplesTest(test_case.TestCase): """Test the private method 'get_samples'.""" def test_raises_if_both_z_and_n_are_none(self): @@ -65,7 +64,7 @@ def test_returns_z_if_z_provided(self): self.assertEqual((10,), z.shape) -class ExpectationTest(tf.test.TestCase): +class ExpectationTest(test_case.TestCase): def test_works_correctly(self): x = tf.constant([-1e6, -100, -10, -1, 1, 10, 100, 1e6]) diff --git a/tensorflow_probability/python/optimizer/BUILD b/tensorflow_probability/python/optimizer/BUILD index a235d29829..9d891f71b8 100644 --- a/tensorflow_probability/python/optimizer/BUILD +++ b/tensorflow_probability/python/optimizer/BUILD @@ -60,6 +60,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -79,6 +80,7 @@ py_test( deps = [ # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -102,6 +104,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -126,6 +129,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -157,6 +161,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -178,6 +183,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -200,5 +206,6 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/optimizer/bfgs.py b/tensorflow_probability/python/optimizer/bfgs.py index 7ef48591c3..532e3bd00f 100644 --- a/tensorflow_probability/python/optimizer/bfgs.py +++ b/tensorflow_probability/python/optimizer/bfgs.py @@ -30,7 +30,8 @@ import collections # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.internal import prefer_static @@ -190,7 +191,7 @@ def quadratic(x): inverse_hessian_estimate: A tensor of shape `[..., n, n]` containing the inverse of the estimated Hessian. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'minimize', [initial_position, tolerance, initial_inverse_hessian_estimate]): initial_position = tf.convert_to_tensor( @@ -255,10 +256,10 @@ def _body(state): search_direction_reset = _get_search_direction( initial_inv_hessian, state.objective_gradient) - actual_serch_direction = tf.compat.v1.where(needs_reset, + actual_serch_direction = tf1.where(needs_reset, search_direction_reset, search_direction) - actual_inv_hessian = tf.compat.v1.where(needs_reset, initial_inv_hessian, + actual_inv_hessian = tf1.where(needs_reset, initial_inv_hessian, state.inverse_hessian_estimate) # Replace the hessian estimate in the state, in case it had to be reset. @@ -343,7 +344,7 @@ def _do_update_inv_hessian(): prev_state.inverse_hessian_estimate) return bfgs_utils.update_fields( next_state, - inverse_hessian_estimate=tf.compat.v1.where( + inverse_hessian_estimate=tf1.where( should_update, next_inv_hessian, prev_state.inverse_hessian_estimate)) diff --git a/tensorflow_probability/python/optimizer/bfgs_test.py b/tensorflow_probability/python/optimizer/bfgs_test.py index 00dca03536..1521a00395 100644 --- a/tensorflow_probability/python/optimizer/bfgs_test.py +++ b/tensorflow_probability/python/optimizer/bfgs_test.py @@ -23,8 +23,11 @@ from scipy.stats import special_ortho_group -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -40,7 +43,7 @@ def _norm(x): @test_util.run_all_in_graph_and_eager_modes -class BfgsTest(tf.test.TestCase): +class BfgsTest(test_case.TestCase): """Tests for BFGS optimization algorithm.""" def test_quadratic_bowl_2d(self): @@ -413,7 +416,7 @@ def quadratic(x): # Test with a vector of unknown dimension, and a fully unknown shape. for shape in ([None], None): - start = tf.compat.v1.placeholder(tf.float32, shape=shape) + start = tf1.placeholder(tf.float32, shape=shape) bfgs_op = tfp.optimizer.bfgs_minimize( quadratic, initial_position=start, tolerance=1e-8) self.assertFalse(bfgs_op.position.shape.is_fully_defined()) diff --git a/tensorflow_probability/python/optimizer/bfgs_utils.py b/tensorflow_probability/python/optimizer/bfgs_utils.py index 617afdae45..9a0ade1361 100644 --- a/tensorflow_probability/python/optimizer/bfgs_utils.py +++ b/tensorflow_probability/python/optimizer/bfgs_utils.py @@ -20,7 +20,8 @@ import collections import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.internal import prefer_static @@ -163,7 +164,7 @@ def _do_update_position(): # For inactive batch members `left.x` is zero. However, their # `search_direction` might also be undefined, so we can't rely on # multiplication by zero to produce a `position_delta` of zero. - position_delta = tf.compat.v1.where( + position_delta = tf1.where( inactive, tf.zeros_like(search_direction), search_direction * tf.expand_dims(ls_result.left.x, axis=-1)) return _update_position( diff --git a/tensorflow_probability/python/optimizer/differential_evolution.py b/tensorflow_probability/python/optimizer/differential_evolution.py index fa94f66835..f401bcdf5e 100644 --- a/tensorflow_probability/python/optimizer/differential_evolution.py +++ b/tensorflow_probability/python/optimizer/differential_evolution.py @@ -49,7 +49,8 @@ import collections import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import distributions @@ -172,7 +173,7 @@ def one_step( next_population_values: A `Tensor` of same shape and dtype as input `population_values`. The function values for the `next_population`. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'one_step', [population, population_values, differential_weight, crossover_prob]): population, _ = _ensure_list(population) @@ -199,15 +200,15 @@ def one_step( infinity = tf.zeros_like(population_values) + np.inf - population_values = tf.compat.v1.where( + population_values = tf1.where( tf.math.is_nan(population_values), x=infinity, y=population_values) to_replace = candidate_values < population_values next_population = [ - tf.compat.v1.where(to_replace, x=candidates_part, y=population_part) + tf1.where(to_replace, x=candidates_part, y=population_part) for candidates_part, population_part in zip(candidates, population) ] - next_values = tf.compat.v1.where( + next_values = tf1.where( to_replace, x=candidate_values, y=population_values) return next_population, next_values @@ -370,7 +371,7 @@ def easom_fn(x, y): raise ValueError('Only one of initial population or initial position ' 'should be specified') - with tf.compat.v1.name_scope( + with tf1.name_scope( name, default_name='minimize', values=[ @@ -512,7 +513,7 @@ def _check_failure(population_values): def _find_best_in_population(population, values): """Finds the population member with the lowest value.""" best_value = tf.math.reduce_min(input_tensor=values) - best_index = tf.compat.v1.where(tf.math.equal(values, best_value))[0, 0] + best_index = tf1.where(tf.math.equal(values, best_value))[0, 0] return ([population_part[best_index] for population_part in population], best_value) @@ -664,7 +665,7 @@ def _binary_crossover(population, dtype=crossover_prob.dtype.base_dtype, seed=seed_stream()) < crossover_prob do_binary_crossover |= force_crossovers - recombinant_flat = tf.compat.v1.where( + recombinant_flat = tf1.where( do_binary_crossover, x=mutant_part_flat, y=pop_part_flat) recombinant = tf.reshape(recombinant_flat, tf.shape(input=population_part)) recombinants.append(recombinant) @@ -734,7 +735,7 @@ def _get_mixing_indices(size, seed=None, name=None): samples without replacement between 0 and size - 1 (inclusive) with the `i`th row not including the number `i`. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, default_name='get_mixing_indices', values=[size]): size = tf.convert_to_tensor(value=size) dtype = size.dtype @@ -753,18 +754,18 @@ def _get_mixing_indices(size, seed=None, name=None): seed=seed_stream()) # Shift second if it is on top of or to the right of first - second = tf.compat.v1.where(first < second, x=second, y=second + 1) + second = tf1.where(first < second, x=second, y=second + 1) smaller = tf.math.minimum(first, second) larger = tf.math.maximum(first, second) # Shift the third one so it does not coincide with either the first or the # second number. Assuming first < second, shift by 1 if the number is in # [first, second) and by 2 if the number is greater than or equal to the # second. - third = tf.compat.v1.where(third < smaller, x=third, y=third + 1) - third = tf.compat.v1.where(third < larger, x=third, y=third + 1) + third = tf1.where(third < smaller, x=third, y=third + 1) + third = tf1.where(third < larger, x=third, y=third + 1) sample = tf.stack([first, second, third], axis=1) to_avoid = tf.expand_dims(tf.range(size), axis=-1) - sample = tf.compat.v1.where(sample < to_avoid, x=sample, y=sample + 1) + sample = tf1.where(sample < to_avoid, x=sample, y=sample + 1) return sample diff --git a/tensorflow_probability/python/optimizer/differential_evolution_test.py b/tensorflow_probability/python/optimizer/differential_evolution_test.py index 1936f62afa..ecd472cc39 100644 --- a/tensorflow_probability/python/optimizer/differential_evolution_test.py +++ b/tensorflow_probability/python/optimizer/differential_evolution_test.py @@ -22,13 +22,15 @@ from scipy.stats import special_ortho_group -import tensorflow as tf +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class DifferentialEvolutionTest(tf.test.TestCase): +class DifferentialEvolutionTest(test_case.TestCase): """Tests for Differential Evolution optimization algorithm.""" def test_quadratic_bowl_2d(self): diff --git a/tensorflow_probability/python/optimizer/lbfgs.py b/tensorflow_probability/python/optimizer/lbfgs.py index 9bc71b8110..ce1c5b3ace 100644 --- a/tensorflow_probability/python/optimizer/lbfgs.py +++ b/tensorflow_probability/python/optimizer/lbfgs.py @@ -30,7 +30,8 @@ import collections # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.internal import prefer_static @@ -204,7 +205,7 @@ def quadratic(x): if stopping_condition is None: stopping_condition = bfgs_utils.converged_all - with tf.compat.v1.name_scope(name, 'minimize', [initial_position, tolerance]): + with tf1.name_scope(name, 'minimize', [initial_position, tolerance]): initial_position = tf.convert_to_tensor( value=initial_position, name='initial_position') dtype = initial_position.dtype.base_dtype @@ -463,4 +464,4 @@ def _queue_push(queue, should_update, new_vecs): update_pattern = tf.broadcast_to( should_update[tf.newaxis, ..., tf.newaxis], distribution_util.prefer_static_shape(queue)) - return tf.compat.v1.where(update_pattern, new_queue, queue) + return tf1.where(update_pattern, new_queue, queue) diff --git a/tensorflow_probability/python/optimizer/lbfgs_test.py b/tensorflow_probability/python/optimizer/lbfgs_test.py index a34c45fae4..a70e573cdb 100644 --- a/tensorflow_probability/python/optimizer/lbfgs_test.py +++ b/tensorflow_probability/python/optimizer/lbfgs_test.py @@ -22,8 +22,11 @@ import numpy as np from scipy.stats import special_ortho_group -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @@ -39,7 +42,7 @@ def _norm(x): @test_util.run_all_in_graph_and_eager_modes -class LBfgsTest(tf.test.TestCase): +class LBfgsTest(test_case.TestCase): """Tests for LBFGS optimization algorithm.""" def test_quadratic_bowl_2d(self): @@ -363,7 +366,7 @@ def quadratic(x): # Test with a vector of unknown dimension, and a fully unknown shape. for shape in ([None], None): - start = tf.compat.v1.placeholder(tf.float32, shape=shape) + start = tf1.placeholder(tf.float32, shape=shape) lbfgs_op = tfp.optimizer.lbfgs_minimize( quadratic, initial_position=start, tolerance=1e-8) self.assertFalse(lbfgs_op.position.shape.is_fully_defined()) diff --git a/tensorflow_probability/python/optimizer/linesearch/BUILD b/tensorflow_probability/python/optimizer/linesearch/BUILD index 75a593249a..56b283df08 100644 --- a/tensorflow_probability/python/optimizer/linesearch/BUILD +++ b/tensorflow_probability/python/optimizer/linesearch/BUILD @@ -54,5 +54,6 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/optimizer/linesearch/hager_zhang.py b/tensorflow_probability/python/optimizer/linesearch/hager_zhang.py index d6375de1c9..68497a30a1 100644 --- a/tensorflow_probability/python/optimizer/linesearch/hager_zhang.py +++ b/tensorflow_probability/python/optimizer/linesearch/hager_zhang.py @@ -33,7 +33,8 @@ import collections import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import prefer_static from tensorflow_probability.python.optimizer.linesearch.internal import hager_zhang_lib as hzl @@ -246,7 +247,7 @@ def value_and_gradients_function(x): equal to those of `left` on batch members where converged is True. Otherwise, it corresponds to the last interval computed. """ - with tf.compat.v1.name_scope(name, 'hager_zhang', [ + with tf1.name_scope(name, 'hager_zhang', [ initial_step_size, value_at_initial_step, value_at_zero, converged, threshold_use_approximate_wolfe_condition, shrinkage_param, expansion_param, sufficient_decrease_param, curvature_param]): @@ -310,7 +311,7 @@ def _cond(i, val_c, to_fix): return (i < iter_max) & tf.reduce_any(input_tensor=to_fix) def _body(i, val_c, to_fix): - next_c = tf.compat.v1.where(to_fix, val_c.x * step_size_shrink_param, + next_c = tf1.where(to_fix, val_c.x * step_size_shrink_param, val_c.x) next_val_c = value_and_gradients_function(next_c) still_to_fix = to_fix & ~hzl.is_finite(next_val_c) @@ -659,7 +660,7 @@ def _to_str(x): """Converts a bool tensor to a string with True/False values.""" x = tf.convert_to_tensor(value=x) if x.dtype == tf.bool: - return tf.compat.v1.where(x, tf.fill(x.shape, 'True'), + return tf1.where(x, tf.fill(x.shape, 'True'), tf.fill(x.shape, 'False')) return x @@ -679,4 +680,4 @@ def _print(pass_through_tensor, values): flat_values.append(_to_str(v)) continue flat_values.append(_to_str(value)) - return tf.compat.v1.Print(pass_through_tensor, flat_values) + return tf1.Print(pass_through_tensor, flat_values) diff --git a/tensorflow_probability/python/optimizer/linesearch/hager_zhang_test.py b/tensorflow_probability/python/optimizer/linesearch/hager_zhang_test.py index e990867423..9220d93c26 100644 --- a/tensorflow_probability/python/optimizer/linesearch/hager_zhang_test.py +++ b/tensorflow_probability/python/optimizer/linesearch/hager_zhang_test.py @@ -21,8 +21,11 @@ import collections import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @@ -43,7 +46,7 @@ def _is_approx_wolfe(_, f_x, df_x, f_0, df_0, delta, sigma, epsilon): @test_util.run_all_in_graph_and_eager_modes -class HagerZhangTest(tf.test.TestCase): +class HagerZhangTest(test_case.TestCase): """Tests for Hager Zhang line search algorithm.""" def test_quadratic(self): @@ -202,12 +205,12 @@ def _val_and_grad_fn(x): def _test_eval_count_graph(self): starts = [0.1, 4.0] def get_fn(): - eval_count = tf.compat.v2.Variable(0) + eval_count = tf.Variable(0) def _fdf(x): # Enabling locking is critical here. Otherwise, there are race # conditions between various call sites which causes some of the # invocations to be missed. - inc = tf.compat.v1.assign_add(eval_count, 1, use_locking=True) + inc = tf1.assign_add(eval_count, 1, use_locking=True) with tf.control_dependencies([inc]): f = x * x - 2 * x + 1 df = 2 * (x - 1) @@ -218,7 +221,7 @@ def _fdf(x): fdf, counter = get_fn() results = tfp.optimizer.linesearch.hager_zhang( fdf, initial_step_size=tf.constant(start)) - init = tf.compat.v1.global_variables_initializer() + init = tf1.global_variables_initializer() with self.cached_session() as session: session.run(init) results = session.run(results) diff --git a/tensorflow_probability/python/optimizer/linesearch/internal/BUILD b/tensorflow_probability/python/optimizer/linesearch/internal/BUILD index 7c764aa456..26724a85f5 100644 --- a/tensorflow_probability/python/optimizer/linesearch/internal/BUILD +++ b/tensorflow_probability/python/optimizer/linesearch/internal/BUILD @@ -51,6 +51,7 @@ py_test( ":hager_zhang_lib", # numpy dep, # tensorflow dep, + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/math:gradient", ], ) diff --git a/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py b/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py index 5b76bbc95a..6292d4e141 100644 --- a/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py +++ b/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib.py @@ -32,14 +32,15 @@ import collections -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import prefer_static def val_where(cond, tval, fval): """Like tf.where but works on namedtuples.""" if isinstance(tval, tf.Tensor): - return tf.compat.v1.where(cond, tval, fval) + return tf1.where(cond, tval, fval) elif isinstance(tval, tuple): cls = type(tval) return cls(*(val_where(cond, t, f) for t, f in zip(tval, fval))) @@ -137,7 +138,7 @@ def secant2(value_and_gradients_function, right: Return value of value_and_gradients_function at the updated right end point of the interval. """ - with tf.compat.v1.name_scope(name, 'secant2', [ + with tf1.name_scope(name, 'secant2', [ val_0, search_interval, f_lim, sufficient_decrease_param, curvature_param]): # This will always be s.t. left <= c <= right @@ -202,9 +203,9 @@ def _secant2_inner(value_and_gradients_function, updated_right = active & tf.equal(val_right.x, val_c.x) is_new = updated_left | updated_right - next_c = tf.compat.v1.where(updated_left, _secant(initial_args.left, + next_c = tf1.where(updated_left, _secant(initial_args.left, val_left), val_c.x) - next_c = tf.compat.v1.where(updated_right, + next_c = tf1.where(updated_right, _secant(initial_args.right, val_right), next_c) in_range = (val_left.x <= next_c) & (next_c <= val_right.x) diff --git a/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib_test.py b/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib_test.py index 417bdf2496..2340b61804 100644 --- a/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib_test.py +++ b/tensorflow_probability/python/optimizer/linesearch/internal/hager_zhang_lib_test.py @@ -20,11 +20,11 @@ import collections import numpy as np -import tensorflow as tf +import tensorflow.compat.v2 as tf +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.math.gradient import value_and_gradient from tensorflow_probability.python.optimizer.linesearch.internal import hager_zhang_lib as hzl - from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @@ -86,7 +86,7 @@ def test_function_x_y_dy(x, y, dy, eps=0.01): @test_util.run_all_in_graph_and_eager_modes -class HagerZhangLibTest(tf.test.TestCase): +class HagerZhangLibTest(test_case.TestCase): def test_secant2_batching_vs_mapping(self): # We build a simple example function with 2 batches, one where the wolfe diff --git a/tensorflow_probability/python/optimizer/nelder_mead.py b/tensorflow_probability/python/optimizer/nelder_mead.py index 1a1bc38f45..3cf850a337 100644 --- a/tensorflow_probability/python/optimizer/nelder_mead.py +++ b/tensorflow_probability/python/optimizer/nelder_mead.py @@ -28,7 +28,8 @@ import collections # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import prefer_static @@ -247,7 +248,7 @@ def sqrt_quadratic(x): supplied. 2. If `initial_simplex` and `step_sizes` are both specified. """ - with tf.compat.v1.name_scope(name, 'minimize', [ + with tf1.name_scope(name, 'minimize', [ initial_simplex, initial_vertex, step_sizes, objective_at_initial_simplex, objective_at_initial_vertex, func_tolerance, position_tolerance ]): @@ -353,7 +354,7 @@ def nelder_mead_one_step(current_simplex, shrinkage=None, name=None): """A single iteration of the Nelder Mead algorithm.""" - with tf.compat.v1.name_scope(name, 'nelder_mead_one_step'): + with tf1.name_scope(name, 'nelder_mead_one_step'): domain_dtype = current_simplex.dtype.base_dtype order = tf.argsort( current_objective_values, direction='ASCENDING', stable=True) @@ -757,7 +758,7 @@ def _default_step_sizes(reference_vertex): small_sizes = tf.ones_like(reference_vertex) * 0.00025 # Step size to choose when the coordinate is non-zero. large_sizes = reference_vertex * 0.05 - return tf.compat.v1.where( + return tf1.where( tf.abs(reference_vertex) < _EPSILON, small_sizes, large_sizes) diff --git a/tensorflow_probability/python/optimizer/nelder_mead_test.py b/tensorflow_probability/python/optimizer/nelder_mead_test.py index a3d901c048..a15b45e212 100644 --- a/tensorflow_probability/python/optimizer/nelder_mead_test.py +++ b/tensorflow_probability/python/optimizer/nelder_mead_test.py @@ -22,13 +22,15 @@ from scipy.stats import special_ortho_group -import tensorflow as tf +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class NelderMeadTest(tf.test.TestCase): +class NelderMeadTest(test_case.TestCase): """Tests for Nelder-Mead optimization algorithm.""" def test_quadratic_bowl_2d(self): diff --git a/tensorflow_probability/python/optimizer/proximal_hessian_sparse.py b/tensorflow_probability/python/optimizer/proximal_hessian_sparse.py index 06a0338fce..8b9dd85385 100644 --- a/tensorflow_probability/python/optimizer/proximal_hessian_sparse.py +++ b/tensorflow_probability/python/optimizer/proximal_hessian_sparse.py @@ -24,7 +24,8 @@ from __future__ import print_function import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import prefer_static from tensorflow_probability.python.math.generic import soft_threshold @@ -85,7 +86,7 @@ def _sparse_or_dense_matmul_onehot(sparse_or_dense_matrix, col_index): `sparse_or_dense_matrix`. """ if isinstance(sparse_or_dense_matrix, - (tf.SparseTensor, tf.compat.v1.SparseTensorValue)): + (tf.SparseTensor, tf1.SparseTensorValue)): # TODO(b/111924846): Implement better (ideally in a way that allows us to # eliminate the `num_rows` arg, if possible). num_rows = _get_shape(sparse_or_dense_matrix)[-2] @@ -234,7 +235,7 @@ def minimize_one_step(gradient_unregularized_loss, tolerance, learning_rate, ] - with tf.compat.v1.name_scope(name, 'minimize_one_step', graph_deps): + with tf1.name_scope(name, 'minimize_one_step', graph_deps): x_shape = _get_shape(x_start) batch_shape = x_shape[:-1] dims = x_shape[-1] @@ -353,7 +354,7 @@ def _loop_body( # pylint: disable=missing-docstring # computed incrementally, where x_update_end and x_update_start are as # defined in the convergence criteria. Accordingly, we reset # x_update_diff_norm_sq to zero at the beginning of each sweep. - x_update_diff_norm_sq = tf.compat.v1.where( + x_update_diff_norm_sq = tf1.where( tf.equal(coord, 0), tf.zeros_like(x_update_diff_norm_sq), x_update_diff_norm_sq) @@ -559,7 +560,7 @@ def minimize(grad_and_hessian_loss_fn, tolerance, learning_rate, ], - with tf.compat.v1.name_scope(name, 'minimize', graph_deps): + with tf1.name_scope(name, 'minimize', graph_deps): def _loop_cond(x_start, converged, iter_): del x_start diff --git a/tensorflow_probability/python/optimizer/proximal_hessian_sparse_test.py b/tensorflow_probability/python/optimizer/proximal_hessian_sparse_test.py index 924c7b2795..96264b8861 100644 --- a/tensorflow_probability/python/optimizer/proximal_hessian_sparse_test.py +++ b/tensorflow_probability/python/optimizer/proximal_hessian_sparse_test.py @@ -21,17 +21,18 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp -tfd = tfp.distributions +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top class _ProximalHessianTest(object): def _make_placeholder(self, x): - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=x, shape=(x.shape if self.use_static_shape else None)) def _adjust_dtype_and_shape_hints(self, x): @@ -212,28 +213,28 @@ def _grad_and_hessian_unregularized_loss_fn(x): @test_util.run_all_in_graph_and_eager_modes -class ProximalHessianTestStaticShapeFloat32(tf.test.TestCase, +class ProximalHessianTestStaticShapeFloat32(test_case.TestCase, _ProximalHessianTest): dtype = tf.float32 use_static_shape = True @test_util.run_all_in_graph_and_eager_modes -class ProximalHessianTestDynamicShapeFloat32(tf.test.TestCase, +class ProximalHessianTestDynamicShapeFloat32(test_case.TestCase, _ProximalHessianTest): dtype = tf.float32 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes -class ProximalHessianTestStaticShapeFloat64(tf.test.TestCase, +class ProximalHessianTestStaticShapeFloat64(test_case.TestCase, _ProximalHessianTest): dtype = tf.float64 use_static_shape = True @test_util.run_all_in_graph_and_eager_modes -class ProximalHessianTestDynamicShapeFloat64(tf.test.TestCase, +class ProximalHessianTestDynamicShapeFloat64(test_case.TestCase, _ProximalHessianTest): dtype = tf.float64 use_static_shape = False diff --git a/tensorflow_probability/python/optimizer/sgld.py b/tensorflow_probability/python/optimizer/sgld.py index 9f36bf2264..602cf1b7d0 100644 --- a/tensorflow_probability/python/optimizer/sgld.py +++ b/tensorflow_probability/python/optimizer/sgld.py @@ -18,7 +18,8 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.internal import dtype_util @@ -31,7 +32,7 @@ ] -class StochasticGradientLangevinDynamics(tf.compat.v2.optimizers.Optimizer): +class StochasticGradientLangevinDynamics(tf.optimizers.Optimizer): """An optimizer module for stochastic gradient Langevin dynamics. This implements the preconditioned Stochastic Gradient Langevin Dynamics @@ -67,8 +68,8 @@ class StochasticGradientLangevinDynamics(tf.compat.v2.optimizers.Optimizer): # Loss is defined through the Cholesky decomposition chol = tf.linalg.cholesky(true_cov) - var_1 = tf.compat.v2.Variable(name='var_1', initial_value=[1., 1.]) - var_2 = tf.compat.v2.Variable(name='var_2', initial_value=[1.]) + var_1 = tf.Variable(name='var_1', initial_value=[1., 1.]) + var_2 = tf.Variable(name='var_2', initial_value=[1.]) def loss_fn(): var = tf.concat([var_1, var_2], axis=-1) @@ -76,7 +77,7 @@ def loss_fn(): return tf.linalg.matvec(loss_part, var, transpose_a=True) # Set up the learning rate with a polynomial decay - step = tf.compat.v2.Variable(0, dtype=tf.int64) + step = tf.Variable(0, dtype=tf.int64) starter_learning_rate = .3 end_learning_rate = 1e-4 decay_steps = 1e4 @@ -154,7 +155,7 @@ def __init__(self, name=None, parallel_iterations=10): default_name = 'StochasticGradientLangevinDynamics' - with tf.compat.v1.name_scope(name, default_name, [ + with tf1.name_scope(name, default_name, [ learning_rate, preconditioner_decay_rate, data_size, burnin, diagonal_bias ]): @@ -180,31 +181,31 @@ def __init__(self, self._parallel_iterations = parallel_iterations self._preconditioner_decay_rate = distribution_util.with_dependencies([ - tf.compat.v1.assert_non_negative( + tf1.assert_non_negative( self._preconditioner_decay_rate, message='`preconditioner_decay_rate` must be non-negative'), - tf.compat.v1.assert_less_equal( + tf1.assert_less_equal( self._preconditioner_decay_rate, 1., message='`preconditioner_decay_rate` must be at most 1.'), ], self._preconditioner_decay_rate) self._data_size = distribution_util.with_dependencies([ - tf.compat.v1.assert_greater( + tf1.assert_greater( self._data_size, 0, message='`data_size` must be greater than zero') ], self._data_size) self._burnin = distribution_util.with_dependencies([ - tf.compat.v1.assert_non_negative( + tf1.assert_non_negative( self._burnin, message='`burnin` must be non-negative'), - tf.compat.v1.assert_integer( + tf1.assert_integer( self._burnin, message='`burnin` must be an integer') ], self._burnin) self._diagonal_bias = distribution_util.with_dependencies([ - tf.compat.v1.assert_non_negative( + tf1.assert_non_negative( self._diagonal_bias, message='`diagonal_bias` must be non-negative') ], self._diagonal_bias) @@ -226,7 +227,7 @@ def _prepare(self, var_list): # want to decay the learning rate dynamically. self._learning_rate_tensor = distribution_util.with_dependencies( [ - tf.compat.v1.assert_non_negative( + tf1.assert_non_negative( self._learning_rate, message='`learning_rate` must be non-negative') ], @@ -261,7 +262,7 @@ def variable_scope(self): def _apply_noisy_update(self, mom, grad, var, indices=None): # Compute and apply the gradient update following # preconditioned Langevin dynamics - stddev = tf.compat.v1.where( + stddev = tf1.where( tf.squeeze(self.iterations > tf.cast(self._burnin, tf.int64)), tf.cast(tf.math.rsqrt(self._learning_rate), grad.dtype), tf.zeros([], grad.dtype)) diff --git a/tensorflow_probability/python/optimizer/sgld_test.py b/tensorflow_probability/python/optimizer/sgld_test.py index b1c36d24e7..f78c5fb50a 100644 --- a/tensorflow_probability/python/optimizer/sgld_test.py +++ b/tensorflow_probability/python/optimizer/sgld_test.py @@ -19,18 +19,20 @@ from __future__ import print_function # Dependency imports -import numpy as np -import tensorflow as tf +import numpy as np +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.math import diag_jacobian -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class StochasticGradientLangevinDynamicsOptimizerTest(tf.test.TestCase): +class StochasticGradientLangevinDynamicsOptimizerTest(test_case.TestCase): def testBasic(self): if tf.executing_eagerly(): @@ -38,8 +40,8 @@ def testBasic(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.cached_session(): - var0 = tf.compat.v2.Variable([1.1, 2.1], dtype=dtype) - var1 = tf.compat.v2.Variable([3., 4.], dtype=dtype) + var0 = tf.Variable([1.1, 2.1], dtype=dtype) + var1 = tf.Variable([3., 4.], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) decay_rate = 0.53 @@ -48,7 +50,7 @@ def testBasic(self): sgd_op = sgd_optimizer.apply_gradients( zip([grads0, grads1], [var0, var1])) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0)) self.assertAllCloseAccordingToType([3., 4.], self.evaluate(var1)) @@ -76,12 +78,12 @@ def testBasicMultiInstance(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.cached_session(): - var0 = tf.compat.v2.Variable([1.1, 2.1], dtype=dtype) - var1 = tf.compat.v2.Variable([3., 4.], dtype=dtype) + var0 = tf.Variable([1.1, 2.1], dtype=dtype) + var1 = tf.Variable([3., 4.], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) - vara = tf.compat.v2.Variable([1.1, 2.1], dtype=dtype) - varb = tf.compat.v2.Variable([3., 4.], dtype=dtype) + vara = tf.Variable([1.1, 2.1], dtype=dtype) + varb = tf.Variable([3., 4.], dtype=dtype) gradsa = tf.constant([0.1, 0.1], dtype=dtype) gradsb = tf.constant([0.01, 0.01], dtype=dtype) decay_rate = 0.5 @@ -93,7 +95,7 @@ def testBasicMultiInstance(self): 3., preconditioner_decay_rate=decay_rate) sgd_op2 = sgd_optimizer2.apply_gradients( zip([gradsa, gradsb], [vara, varb])) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0)) self.assertAllCloseAccordingToType([3., 4.], self.evaluate(var1)) @@ -133,8 +135,8 @@ def testTensorLearningRate(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.cached_session(): - var0 = tf.compat.v2.Variable([1.1, 2.1], dtype=dtype) - var1 = tf.compat.v2.Variable([3., 4.], dtype=dtype) + var0 = tf.Variable([1.1, 2.1], dtype=dtype) + var1 = tf.Variable([3., 4.], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) lrate = tf.constant(3.0) @@ -143,7 +145,7 @@ def testTensorLearningRate(self): lrate, preconditioner_decay_rate=tf.constant( decay_rate)).apply_gradients( zip([grads0, grads1], [var0, var1])) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0)) self.assertAllCloseAccordingToType([3., 4.], self.evaluate(var1)) @@ -172,10 +174,10 @@ def testGradWrtRef(self): with self.cached_session(): opt = tfp.optimizer.StochasticGradientLangevinDynamics(3.0) values = [1., 3.] - vars_ = [tf.compat.v2.Variable([v], dtype=dtype) for v in values] + vars_ = [tf.Variable([v], dtype=dtype) for v in values] loss = lambda: vars_[0] + vars_[1] # pylint: disable=cell-var-from-loop grads_and_vars = opt._compute_gradients(loss, vars_) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) for grad, _ in grads_and_vars: self.assertAllCloseAccordingToType([1.], self.evaluate(grad)) @@ -185,7 +187,7 @@ def testBurnin(self): for burnin_dtype in [tf.int8, tf.int16, tf.int32, tf.int64]: with self.cached_session(): - var0 = tf.compat.v2.Variable([1.1, 2.1], dtype=tf.float32) + var0 = tf.Variable([1.1, 2.1], dtype=tf.float32) grads0 = tf.constant([0.1, 0.1], dtype=tf.float32) decay_rate = 0.53 sgd_optimizer = tfp.optimizer.StochasticGradientLangevinDynamics( @@ -194,7 +196,7 @@ def testBurnin(self): burnin=tf.constant(10, dtype=burnin_dtype)) sgd_op = sgd_optimizer.apply_gradients([(grads0, var0)]) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) # Validate that iterations is initialized to 0. self.assertAllCloseAccordingToType( 0, self.evaluate(sgd_optimizer.iterations)) @@ -210,10 +212,10 @@ def testWithGlobalStep(self): for dtype in [tf.float32, tf.float64]: with self.cached_session(): - step = tf.compat.v2.Variable(0, dtype=tf.int64) + step = tf.Variable(0, dtype=tf.int64) - var0 = tf.compat.v2.Variable([1.1, 2.1], dtype=dtype) - var1 = tf.compat.v2.Variable([3., 4.], dtype=dtype) + var0 = tf.Variable([1.1, 2.1], dtype=dtype) + var1 = tf.Variable([3., 4.], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) decay_rate = 0.1 @@ -223,7 +225,7 @@ def testWithGlobalStep(self): sgd_opt.iterations = step sgd_op = sgd_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0)) @@ -252,8 +254,8 @@ def testSparseBasic(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.cached_session(): - var0 = tf.compat.v2.Variable([[1.1], [2.1]], dtype=dtype) - var1 = tf.compat.v2.Variable([[3.], [4.]], dtype=dtype) + var0 = tf.Variable([[1.1], [2.1]], dtype=dtype) + var1 = tf.Variable([[3.], [4.]], dtype=dtype) grads0 = tf.IndexedSlices( tf.constant([0.1], shape=[1, 1], dtype=dtype), tf.constant([0]), tf.constant([2, 1])) @@ -264,7 +266,7 @@ def testSparseBasic(self): sgd_op = tfp.optimizer.StochasticGradientLangevinDynamics( 3., preconditioner_decay_rate=decay_rate).apply_gradients( zip([grads0, grads1], [var0, var1])) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.1], [2.1]], self.evaluate(var0)) self.assertAllCloseAccordingToType([[3.], [4.]], self.evaluate(var1)) @@ -295,8 +297,8 @@ def testPreconditionerComputedCorrectly(self): # Target distribution is defined through the Cholesky decomposition chol = tf.linalg.cholesky(true_cov) target = tfd.MultivariateNormalTriL(loc=true_mean, scale_tril=chol) - var_1 = tf.compat.v2.Variable(name='var_1', initial_value=[1., 1.]) - var_2 = tf.compat.v2.Variable(name='var_2', initial_value=[1.]) + var_1 = tf.Variable(name='var_1', initial_value=[1., 1.]) + var_2 = tf.Variable(name='var_2', initial_value=[1.]) var = [var_1, var_2] @@ -338,7 +340,7 @@ def target_fn(x, y): var_true = [v - learning_rate * 0.5 * (p * g - p_g) for v, p, g, p_g in zip(var, preconditioner, grads, preconditioner_grads)] - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) var_true_ = self.evaluate(var_true) self.evaluate(step) var_ = self.evaluate(var) # new `var` after one SGLD step @@ -352,14 +354,14 @@ def testDiffusionBehavesCorrectly(self): with self.cached_session(): # Set up random seed for the optimizer - tf.compat.v1.set_random_seed(42) + tf1.set_random_seed(42) dtype = np.float32 true_mean = dtype([0, 0, 0]) true_cov = dtype([[1, 0.25, 0.25], [0.25, 1, 0.25], [0.25, 0.25, 1]]) # Loss is defined through the Cholesky decomposition chol = tf.linalg.cholesky(true_cov) - var_1 = tf.compat.v2.Variable(name='var_1', initial_value=[1., 1.]) - var_2 = tf.compat.v2.Variable(name='var_2', initial_value=[1.]) + var_1 = tf.Variable(name='var_1', initial_value=[1., 1.]) + var_2 = tf.Variable(name='var_2', initial_value=[1.]) # Loss function def loss_fn(): @@ -368,11 +370,11 @@ def loss_fn(): return tf.linalg.matvec(loss_part, var, transpose_a=True) # Set up the learning rate with a polynomial decay - global_step = tf.compat.v1.train.get_or_create_global_step() + global_step = tf1.train.get_or_create_global_step() starter_learning_rate = .3 end_learning_rate = 1e-4 decay_steps = 1e4 - learning_rate = tf.compat.v1.train.polynomial_decay( + learning_rate = tf1.train.polynomial_decay( starter_learning_rate, global_step, decay_steps, @@ -389,7 +391,7 @@ def loss_fn(): training_steps = 5000 # Record the steps as and treat them as samples samples = [np.zeros([training_steps, 2]), np.zeros([training_steps, 1])] - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) for step in range(training_steps): self.evaluate(optimizer) sample = [self.evaluate(var_1), self.evaluate(var_2)] diff --git a/tensorflow_probability/python/optimizer/variational_sgd.py b/tensorflow_probability/python/optimizer/variational_sgd.py index 6b04184e7b..5a6f73a02c 100644 --- a/tensorflow_probability/python/optimizer/variational_sgd.py +++ b/tensorflow_probability/python/optimizer/variational_sgd.py @@ -18,7 +18,8 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import distribution_util from tensorflow_probability.python.internal import dtype_util @@ -30,7 +31,7 @@ ] -class VariationalSGD(tf.compat.v2.optimizers.Optimizer): +class VariationalSGD(tf.optimizers.Optimizer): """An optimizer module for constant stochastic gradient descent. This implements an optimizer module for the constant stochastic gradient @@ -89,7 +90,7 @@ def __init__(self, use_single_learning_rate=False, name=None): default_name = 'VariationalSGD' - with tf.compat.v1.name_scope(name, default_name, [ + with tf1.name_scope(name, default_name, [ max_learning_rate, preconditioner_decay_rate, batch_size, burnin, burnin_max_learning_rate ]): @@ -111,44 +112,44 @@ def __init__(self, self._use_single_learning_rate = use_single_learning_rate self._preconditioner_decay_rate = distribution_util.with_dependencies([ - tf.compat.v1.assert_non_negative( + tf1.assert_non_negative( self._preconditioner_decay_rate, message='`preconditioner_decay_rate` must be non-negative'), - tf.compat.v1.assert_less_equal( + tf1.assert_less_equal( self._preconditioner_decay_rate, 1., message='`preconditioner_decay_rate` must be at most 1.'), ], self._preconditioner_decay_rate) self._batch_size = distribution_util.with_dependencies([ - tf.compat.v1.assert_greater( + tf1.assert_greater( self._batch_size, 0, message='`batch_size` must be greater than zero') ], self._batch_size) self._total_num_examples = distribution_util.with_dependencies([ - tf.compat.v1.assert_greater( + tf1.assert_greater( self._total_num_examples, 0, message='`total_num_examples` must be greater than zero') ], self._total_num_examples) self._burnin = distribution_util.with_dependencies([ - tf.compat.v1.assert_non_negative( + tf1.assert_non_negative( self._burnin, message='`burnin` must be non-negative'), - tf.compat.v1.assert_integer( + tf1.assert_integer( self._burnin, message='`burnin` must be an integer') ], self._burnin) self._burnin_max_learning_rate = distribution_util.with_dependencies([ - tf.compat.v1.assert_non_negative( + tf1.assert_non_negative( self._burnin_max_learning_rate, message='`burnin_max_learning_rate` must be non-negative') ], self._burnin_max_learning_rate) self._max_learning_rate = distribution_util.with_dependencies([ - tf.compat.v1.assert_non_negative( + tf1.assert_non_negative( self._max_learning_rate, message='`max_learning_rate` must be non-negative') ], self._max_learning_rate) @@ -185,7 +186,7 @@ def _get_coordinatewise_learning_rate(self, grad, var): # via Welford's algorithm if isinstance(grad, tf.Tensor): delta = grad - avg_first - first_moment_update = avg_first.assign_add(delta * tf.compat.v1.where( + first_moment_update = avg_first.assign_add(delta * tf1.where( self.iterations < 1, tf.cast(1, var.dtype), 1. - decay_tensor)) with tf.control_dependencies([first_moment_update]): @@ -197,13 +198,13 @@ def _get_coordinatewise_learning_rate(self, grad, var): tf.clip_by_value(avg_second, 1e-12, 1e12)) elif isinstance(grad, tf.IndexedSlices): delta = grad.values - tf.gather_nd(avg_first, grad.indices) - first_moment_update = tf.compat.v1.scatter_add( + first_moment_update = tf1.scatter_add( avg_first, grad.indices, - delta * tf.compat.v1.where(self.iterations < 1, tf.cast( + delta * tf1.where(self.iterations < 1, tf.cast( 1., var.dtype), 1. - decay_tensor)) with tf.control_dependencies([first_moment_update]): - avg_second = tf.compat.v1.scatter_add( + avg_second = tf1.scatter_add( avg_second, grad.indices, tf.cast(self.iterations < 1, var.dtype) * -(1. - decay_tensor) * (tf.gather_nd(avg_second, grad.indices) - @@ -226,7 +227,7 @@ def _get_coordinatewise_learning_rate(self, grad, var): diag_preconditioner) def _resource_apply_dense(self, grad, var): - max_learning_rate = tf.compat.v1.where( + max_learning_rate = tf1.where( self.iterations < tf.cast(self._burnin, tf.int64), self._burnin_max_learning_rate, self._max_learning_rate) @@ -242,7 +243,7 @@ def _resource_apply_dense(self, grad, var): use_locking=self._use_locking) def _resource_apply_sparse(self, grad, var, indices): - max_learning_rate = tf.compat.v1.where( + max_learning_rate = tf1.where( self.iterations < tf.cast(self._burnin, tf.int64), self._burnin_max_learning_rate, self._max_learning_rate) diff --git a/tensorflow_probability/python/optimizer/variational_sgd_test.py b/tensorflow_probability/python/optimizer/variational_sgd_test.py index 72f70e373f..14737c82f5 100644 --- a/tensorflow_probability/python/optimizer/variational_sgd_test.py +++ b/tensorflow_probability/python/optimizer/variational_sgd_test.py @@ -18,20 +18,23 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp + +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class VariationalSGDTest(tf.test.TestCase): +class VariationalSGDTest(test_case.TestCase): def testBasic(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.cached_session(): - var0 = tf.compat.v2.Variable([1.1, 2.1], dtype=dtype) - var1 = tf.compat.v2.Variable([3.0, 4.0], dtype=dtype) + var0 = tf.Variable([1.1, 2.1], dtype=dtype) + var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) decay_rate = 0.53 @@ -44,7 +47,7 @@ def testBasic(self): use_single_learning_rate=True) if not tf.executing_eagerly(): sgd_op = sgd_opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0)) @@ -63,12 +66,12 @@ def testBasic(self): def testBasicMultiInstance(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.cached_session(): - var0 = tf.compat.v2.Variable([1.1, 2.1], dtype=dtype) - var1 = tf.compat.v2.Variable([3.0, 4.0], dtype=dtype) + var0 = tf.Variable([1.1, 2.1], dtype=dtype) + var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) - vara = tf.compat.v2.Variable([1.1, 2.1], dtype=dtype) - varb = tf.compat.v2.Variable([3.0, 4.0], dtype=dtype) + vara = tf.Variable([1.1, 2.1], dtype=dtype) + varb = tf.Variable([3.0, 4.0], dtype=dtype) gradsa = tf.constant([0.1, 0.1], dtype=dtype) gradsb = tf.constant([0.01, 0.01], dtype=dtype) decay_rate = 0.5 @@ -92,7 +95,7 @@ def testBasicMultiInstance(self): zip([grads0, grads1], [var0, var1])) sgd_op2 = optimizer2.apply_gradients( zip([gradsa, gradsb], [vara, varb])) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1)) @@ -126,8 +129,8 @@ def testTensorLearningRate(self): # for dtype in [tf.half, tf.float32, tf.float64]: for dtype in [tf.float32, tf.float64]: with self.cached_session(): - var0 = tf.compat.v2.Variable([1.1, 2.1], dtype=dtype) - var1 = tf.compat.v2.Variable([3.0, 4.0], dtype=dtype) + var0 = tf.Variable([1.1, 2.1], dtype=dtype) + var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) lrate = tf.constant(3.0) @@ -143,7 +146,7 @@ def testTensorLearningRate(self): if not tf.executing_eagerly(): sgd_op = optimizer.apply_gradients( zip([grads0, grads1], [var0, var1])) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1)) @@ -161,7 +164,7 @@ def testTensorLearningRate(self): def testBurnin(self): for burnin_dtype in [tf.int8, tf.int16, tf.int32, tf.int64]: with self.cached_session(): - var0 = tf.compat.v2.Variable([1.1, 2.1], dtype=tf.float32) + var0 = tf.Variable([1.1, 2.1], dtype=tf.float32) grads0 = tf.constant([0.1, 0.1], dtype=tf.float32) decay_rate = 0.53 sgd_optimizer = tfp.optimizer.VariationalSGD( @@ -176,7 +179,7 @@ def testBurnin(self): if not tf.executing_eagerly(): sgd_op = sgd_optimizer.apply_gradients([(grads0, var0)]) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) # Validate iterations is initialized to 0. self.assertAllCloseAccordingToType( @@ -197,11 +200,11 @@ def testTensorDecayLearningRate(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.cached_session(): - var0 = tf.compat.v2.Variable([1.1, 2.1], dtype=dtype) - var1 = tf.compat.v2.Variable([3.0, 4.0], dtype=dtype) + var0 = tf.Variable([1.1, 2.1], dtype=dtype) + var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) - lrate = tf.compat.v2.Variable(3.0) + lrate = tf.Variable(3.0) decay_rate = 0.5 batch_size = 2 total_num_examples = 10 @@ -214,7 +217,7 @@ def testTensorDecayLearningRate(self): if not tf.executing_eagerly(): sgd_op = optimizer.apply_gradients( zip([grads0, grads1], [var0, var1])) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1)) @@ -253,19 +256,19 @@ def testGradWrtRef(self): with self.cached_session(): opt = tfp.optimizer.VariationalSGD(1, 1, max_learning_rate=1.0) values = [1.0, 3.0] - vars_ = [tf.compat.v2.Variable([v], dtype=dtype) for v in values] + vars_ = [tf.Variable([v], dtype=dtype) for v in values] loss = lambda: vars_[0] + vars_[1] # pylint: disable=cell-var-from-loop grads_and_vars = opt._compute_gradients(loss, vars_) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) for grad, _ in grads_and_vars: self.assertAllCloseAccordingToType([1.0], self.evaluate(grad)) def testWithGlobalStep(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.cached_session(): - global_step = tf.compat.v2.Variable(0, dtype=tf.int64) - var0 = tf.compat.v2.Variable([1.1, 2.1], dtype=dtype) - var1 = tf.compat.v2.Variable([3.0, 4.0], dtype=dtype) + global_step = tf.Variable(0, dtype=tf.int64) + var0 = tf.Variable([1.1, 2.1], dtype=dtype) + var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) decay_rate = 0.1 @@ -283,7 +286,7 @@ def testWithGlobalStep(self): sgd_op = sgd_optimizer.apply_gradients( zip([grads0, grads1], [var0, var1])) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) init_step_value = self.evaluate(global_step) # Fetch params to validate initial values @@ -308,8 +311,8 @@ def testWithGlobalStep(self): def testSparseBasic(self): for dtype in [tf.half, tf.float32, tf.float64]: with self.cached_session(): - var0 = tf.compat.v2.Variable([[1.1], [2.1]], dtype=dtype) - var1 = tf.compat.v2.Variable([[3.0], [4.0]], dtype=dtype) + var0 = tf.Variable([[1.1], [2.1]], dtype=dtype) + var1 = tf.Variable([[3.0], [4.0]], dtype=dtype) grads0 = tf.IndexedSlices( tf.constant([0.1], shape=[1, 1], dtype=dtype), tf.constant([0]), tf.constant([2, 1])) @@ -329,7 +332,7 @@ def testSparseBasic(self): sgd_op = sgd_optimizer.apply_gradients( zip([grads0, grads1], [var0, var1])) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.1], [2.1]], self.evaluate(var0)) self.assertAllCloseAccordingToType([[3.0], [4.0]], self.evaluate(var1)) diff --git a/tensorflow_probability/python/positive_semidefinite_kernels/__init__.py b/tensorflow_probability/python/positive_semidefinite_kernels/__init__.py index 6070b149f4..0380cf759e 100644 --- a/tensorflow_probability/python/positive_semidefinite_kernels/__init__.py +++ b/tensorflow_probability/python/positive_semidefinite_kernels/__init__.py @@ -18,6 +18,8 @@ from __future__ import division from __future__ import print_function +import warnings + from tensorflow_probability.python.math.psd_kernels.exp_sin_squared import ExpSinSquared from tensorflow_probability.python.math.psd_kernels.exponentiated_quadratic import ExponentiatedQuadratic from tensorflow_probability.python.math.psd_kernels.feature_scaled import FeatureScaled @@ -35,6 +37,11 @@ from tensorflow.python.util.all_util import remove_undocumented +warnings.warn( + 'tfp.positive_semidefinite_kernels module has been moved to ' + 'tfp.math.psd_kernels. This alias will be deleted on 2019-12-01', + stacklevel=5) + _allowed_symbols = [ 'ExponentiatedQuadratic', 'ExpSinSquared', diff --git a/tensorflow_probability/python/stats/BUILD b/tensorflow_probability/python/stats/BUILD index 1e707fa8da..acde029328 100644 --- a/tensorflow_probability/python/stats/BUILD +++ b/tensorflow_probability/python/stats/BUILD @@ -89,6 +89,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -114,6 +115,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -139,5 +141,6 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/stats/calibration_test.py b/tensorflow_probability/python/stats/calibration_test.py index d3267c4f8f..2bc1e4c824 100644 --- a/tensorflow_probability/python/stats/calibration_test.py +++ b/tensorflow_probability/python/stats/calibration_test.py @@ -23,9 +23,10 @@ import numpy as np import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python.internal import test_case -class CalibrationTest(parameterized.TestCase, tf.test.TestCase): +class CalibrationTest(parameterized.TestCase, test_case.TestCase): _TEMPERATURES = [0.01, 1.0, 5.0] _NLABELS = [2, 4] diff --git a/tensorflow_probability/python/stats/quantiles_test.py b/tensorflow_probability/python/stats/quantiles_test.py index f401f48642..30041600ed 100644 --- a/tensorflow_probability/python/stats/quantiles_test.py +++ b/tensorflow_probability/python/stats/quantiles_test.py @@ -24,12 +24,14 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import + rng = np.random.RandomState(0) @test_util.run_all_in_graph_and_eager_modes -class BincountTest(tf.test.TestCase): +class BincountTest(test_case.TestCase): def test_like_tf_math_bincount_if_axis_is_none(self): arr = rng.randint(0, 10, size=(2, 3, 4)).astype(np.int32) @@ -89,7 +91,7 @@ def test_2d_arr_axis_0_yes_weights(self): @test_util.run_all_in_graph_and_eager_modes -class FindBinsTest(tf.test.TestCase): +class FindBinsTest(test_case.TestCase): def test_1d_array_no_extend_lower_and_upper_dtype_int64(self): x = [-1., 0., 4., 5., 10., 20.] @@ -196,7 +198,7 @@ def test_too_few_edges_raises(self): @test_util.run_all_in_graph_and_eager_modes -class HistogramTest(tf.test.TestCase): +class HistogramTest(test_case.TestCase): def test_uniform_dist_in_1d_specify_extend_interval_and_dtype(self): n_samples = 1000 @@ -356,7 +358,7 @@ def test_2d_uniform_reduce_axis_0_edges_is_2d(self): @test_util.run_all_in_graph_and_eager_modes -class PercentileTestWithLowerInterpolation(tf.test.TestCase): +class PercentileTestWithLowerInterpolation(test_case.TestCase): _interpolation = 'lower' @@ -658,7 +660,7 @@ class PercentileTestWithHigherInterpolation( _interpolation = 'higher' -class PercentileTestWithNearestInterpolation(tf.test.TestCase): +class PercentileTestWithNearestInterpolation(test_case.TestCase): """Test separately because np.round and tf.round make different choices.""" _interpolation = 'nearest' @@ -712,7 +714,7 @@ def test_finds_max_of_long_array(self): @test_util.run_all_in_graph_and_eager_modes -class QuantilesTest(tf.test.TestCase): +class QuantilesTest(test_case.TestCase): """Test for quantiles. Most functionality tested implicitly via percentile.""" def test_quartiles_of_vector(self): diff --git a/tensorflow_probability/python/stats/sample_stats_test.py b/tensorflow_probability/python/stats/sample_stats_test.py index 537d5cb80d..1e43ef1eb3 100644 --- a/tensorflow_probability/python/stats/sample_stats_test.py +++ b/tensorflow_probability/python/stats/sample_stats_test.py @@ -19,10 +19,12 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import from tensorflow.python.ops import spectral_ops_test_util # pylint: disable=g-direct-tensorflow-import @@ -227,7 +229,7 @@ def test_normalization(self): @test_util.run_all_in_graph_and_eager_modes -class AutoCorrelationTestStaticShapeFloat32(tf.test.TestCase, +class AutoCorrelationTestStaticShapeFloat32(test_case.TestCase, _AutoCorrelationTest): @property @@ -240,7 +242,7 @@ def use_static_shape(self): @test_util.run_all_in_graph_and_eager_modes -class AutoCorrelationTestStaticShapeComplex64(tf.test.TestCase, +class AutoCorrelationTestStaticShapeComplex64(test_case.TestCase, _AutoCorrelationTest): @property @@ -253,7 +255,7 @@ def use_static_shape(self): @test_util.run_all_in_graph_and_eager_modes -class AutoCorrelationTestDynamicShapeFloat32(tf.test.TestCase, +class AutoCorrelationTestDynamicShapeFloat32(test_case.TestCase, _AutoCorrelationTest): @property @@ -266,7 +268,7 @@ def use_static_shape(self): @test_util.run_all_in_graph_and_eager_modes -class CovarianceTest(tf.test.TestCase): +class CovarianceTest(test_case.TestCase): def _np_cov_1d(self, x, y): return ((x - x.mean(axis=0)) * (y - y.mean(axis=0))).mean(axis=0) @@ -421,7 +423,7 @@ def test_batch_vector_shape_dtype_ok(self): @test_util.run_all_in_graph_and_eager_modes -class CorrelationTest(tf.test.TestCase): +class CorrelationTest(test_case.TestCase): def _np_corr_1d(self, x, y): assert x.ndim == y.ndim == 1 @@ -481,7 +483,7 @@ def test_batch_vector_sampaxis0_eventaxisn1(self): @test_util.run_all_in_graph_and_eager_modes -class CholeskyCovarianceTest(tf.test.TestCase): +class CholeskyCovarianceTest(test_case.TestCase): def test_batch_vector_sampaxis1_eventaxis2(self): # x.shape = [2, 5000, 2], @@ -510,7 +512,7 @@ def test_batch_vector_sampaxis1_eventaxis2(self): @test_util.run_all_in_graph_and_eager_modes -class VarianceTest(tf.test.TestCase): +class VarianceTest(test_case.TestCase): """Light test: Most methods tested implicitly by CovarianceTest.""" def test_independent_uniform_samples(self): @@ -530,7 +532,7 @@ def test_independent_uniform_samples(self): @test_util.run_all_in_graph_and_eager_modes -class StddevTest(tf.test.TestCase): +class StddevTest(test_case.TestCase): """Light test: Most methods tested implicitly by VarianceTest.""" def test_independent_uniform_samples(self): @@ -550,7 +552,7 @@ def test_independent_uniform_samples(self): @test_util.run_all_in_graph_and_eager_modes -class LogAverageProbsTest(tf.test.TestCase): +class LogAverageProbsTest(test_case.TestCase): def test_mathematical_correctness_bernoulli(self): logits = tf.random.normal([10, 3, 4], seed=42) diff --git a/tensorflow_probability/python/sts/BUILD b/tensorflow_probability/python/sts/BUILD index 66f7e3417a..e97fc19310 100644 --- a/tensorflow_probability/python/sts/BUILD +++ b/tensorflow_probability/python/sts/BUILD @@ -67,6 +67,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -90,6 +91,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -113,6 +115,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -137,6 +140,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -160,6 +164,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -182,6 +187,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -204,6 +210,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -226,6 +233,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -250,6 +258,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -272,6 +281,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -294,6 +304,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -316,6 +327,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", "//tensorflow_probability/python/internal:test_util", ], ) @@ -340,5 +352,6 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/sts/autoregressive.py b/tensorflow_probability/python/sts/autoregressive.py index 70c9f4f183..2fabea6a77 100644 --- a/tensorflow_probability/python/sts/autoregressive.py +++ b/tensorflow_probability/python/sts/autoregressive.py @@ -18,7 +18,8 @@ from __future__ import print_function # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd @@ -165,7 +166,7 @@ def __init__(self, name: Python `str` name prefixed to ops created by this class. Default value: "AutoregressiveStateSpaceModel". """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'AutoregressiveStateSpaceModel', values=[coefficients, level_scale, observation_noise_scale]) as name: @@ -329,7 +330,7 @@ def __init__(self, name: the name of this model component. Default value: 'Autoregressive'. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'Autoregressive', values=[observed_time_series]) as name: masked_time_series = None diff --git a/tensorflow_probability/python/sts/autoregressive_test.py b/tensorflow_probability/python/sts/autoregressive_test.py index 631a9c7d93..fd97496a90 100644 --- a/tensorflow_probability/python/sts/autoregressive_test.py +++ b/tensorflow_probability/python/sts/autoregressive_test.py @@ -20,13 +20,13 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability import distributions as tfd - +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.sts import AutoregressiveStateSpaceModel from tensorflow_probability.python.sts import LocalLevelStateSpaceModel - from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -51,7 +51,7 @@ def ar_explicit_logp(y, coefs, level_scale): return lp -class _AutoregressiveStateSpaceModelTest(tf.test.TestCase): +class _AutoregressiveStateSpaceModelTest(test_case.TestCase): def testEqualsLocalLevel(self): # An AR1 process with coef 1 is just a random walk, equivalent to a local @@ -158,7 +158,7 @@ def _build_placeholder(self, ndarray): """ ndarray = np.asarray(ndarray).astype(self.dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) diff --git a/tensorflow_probability/python/sts/decomposition.py b/tensorflow_probability/python/sts/decomposition.py index d3a7cec9ad..7b6b503dc2 100644 --- a/tensorflow_probability/python/sts/decomposition.py +++ b/tensorflow_probability/python/sts/decomposition.py @@ -20,7 +20,8 @@ import collections # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python.internal import distribution_util as dist_util from tensorflow_probability.python.sts.internal import util as sts_util @@ -74,7 +75,7 @@ def _decompose_from_posterior_marginals( raise ValueError('Model decomposed into components must be an instance of' '`tfp.sts.Sum` (passed model {})'.format(model)) - with tf.compat.v1.name_scope('decompose_from_posterior_marginals'): + with tf1.name_scope('decompose_from_posterior_marginals'): # Extract the component means/covs from the joint latent posterior. latent_sizes = [component.latent_size for component in model.components] @@ -198,7 +199,7 @@ def decompose_by_component(model, observed_time_series, parameter_samples): """ - with tf.compat.v1.name_scope('decompose_by_component', + with tf1.name_scope('decompose_by_component', values=[observed_time_series]): [ observed_time_series, @@ -301,7 +302,7 @@ def decompose_forecast_by_component(model, forecast_dist, parameter_samples): """ - with tf.compat.v1.name_scope('decompose_forecast_by_component'): + with tf1.name_scope('decompose_forecast_by_component'): try: forecast_lgssm = forecast_dist.components_distribution forecast_latent_mean, _ = forecast_lgssm._joint_mean() # pylint: disable=protected-access diff --git a/tensorflow_probability/python/sts/decomposition_test.py b/tensorflow_probability/python/sts/decomposition_test.py index d8f6346d55..45ca1de0c9 100644 --- a/tensorflow_probability/python/sts/decomposition_test.py +++ b/tensorflow_probability/python/sts/decomposition_test.py @@ -15,18 +15,20 @@ """Tests for STS decomposition methods.""" # Dependency imports + import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util + from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import tfl = tf.linalg -tfd = tfp.distributions -class _DecompositionTest(tf.test.TestCase): +class _DecompositionTest(test_case.TestCase): def _build_model_and_params(self, num_timesteps, @@ -145,7 +147,7 @@ def _build_tensor(self, ndarray): """ ndarray = np.asarray(ndarray).astype(self.dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) diff --git a/tensorflow_probability/python/sts/dynamic_regression.py b/tensorflow_probability/python/sts/dynamic_regression.py index 6ef62debc8..9d6cfccab3 100644 --- a/tensorflow_probability/python/sts/dynamic_regression.py +++ b/tensorflow_probability/python/sts/dynamic_regression.py @@ -18,7 +18,8 @@ from __future__ import print_function # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd @@ -167,7 +168,7 @@ def __init__(self, """ - with tf.compat.v1.name_scope(name, 'DynamicLinearRegressionStateSpaceModel', + with tf1.name_scope(name, 'DynamicLinearRegressionStateSpaceModel', values=[drift_scale]) as name: dtype = dtype_util.common_dtype( @@ -282,7 +283,7 @@ def __init__(self, """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'DynamicLinearRegression', values=[observed_time_series]) as name: dtype = dtype_util.common_dtype( diff --git a/tensorflow_probability/python/sts/dynamic_regression_test.py b/tensorflow_probability/python/sts/dynamic_regression_test.py index 6bbdc6efaa..4064245ae2 100644 --- a/tensorflow_probability/python/sts/dynamic_regression_test.py +++ b/tensorflow_probability/python/sts/dynamic_regression_test.py @@ -19,16 +19,17 @@ from __future__ import print_function # Dependency imports + import numpy as np -import tensorflow as tf -import tensorflow_probability as tfp +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.sts import DynamicLinearRegression from tensorflow_probability.python.sts import DynamicLinearRegressionStateSpaceModel from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions - class _DynamicLinearRegressionStateSpaceModelTest(object): @@ -138,27 +139,27 @@ def test_matrices_from_component(self): def _build_placeholder(self, ndarray): ndarray = np.asarray(ndarray).astype(self.dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) @test_util.run_all_in_graph_and_eager_modes class DynamicRegressionStateSpaceModelTestStaticShape32( - tf.test.TestCase, _DynamicLinearRegressionStateSpaceModelTest): + test_case.TestCase, _DynamicLinearRegressionStateSpaceModelTest): dtype = np.float32 use_static_shape = True @test_util.run_all_in_graph_and_eager_modes class DynamicRegressionStateSpaceModelTestDynamicShape32( - tf.test.TestCase, _DynamicLinearRegressionStateSpaceModelTest): + test_case.TestCase, _DynamicLinearRegressionStateSpaceModelTest): dtype = np.float32 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes class DynamicRegressionStateSpaceModelTestStaticShape64( - tf.test.TestCase, _DynamicLinearRegressionStateSpaceModelTest): + test_case.TestCase, _DynamicLinearRegressionStateSpaceModelTest): dtype = np.float64 use_static_shape = True diff --git a/tensorflow_probability/python/sts/fitting_test.py b/tensorflow_probability/python/sts/fitting_test.py index 9323ad6c26..8a98db8e42 100644 --- a/tensorflow_probability/python/sts/fitting_test.py +++ b/tensorflow_probability/python/sts/fitting_test.py @@ -22,9 +22,8 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp -tfd = tfp.distributions +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top -tfl = tf.linalg class _VariationalInferenceTests(object): @@ -137,14 +136,14 @@ def _build_tensor(self, ndarray, dtype=None): @test_util.run_all_in_graph_and_eager_modes -class VariationalInferenceTestsStatic64(tf.test.TestCase, +class VariationalInferenceTestsStatic64(test_case.TestCase, _VariationalInferenceTests): dtype = np.float64 use_static_shape = True # This test runs in graph mode only to reduce test weight. -class VariationalInferenceTestsDynamic32(tf.test.TestCase, +class VariationalInferenceTestsDynamic32(test_case.TestCase, _VariationalInferenceTests): dtype = np.float32 use_static_shape = False @@ -274,7 +273,7 @@ def _build_tensor(self, ndarray, dtype=None): @test_util.run_all_in_graph_and_eager_modes -class HMCTestsStatic32(tf.test.TestCase, parameterized.TestCase, _HMCTests): +class HMCTestsStatic32(test_case.TestCase, parameterized.TestCase, _HMCTests): dtype = np.float32 use_static_shape = True @@ -311,13 +310,13 @@ def test_chain_batch_shape(self, shape_in, expected_batch_shape_out): # This test runs in graph mode only to reduce test weight. -class HMCTestsDynamic32(tf.test.TestCase, _HMCTests): +class HMCTestsDynamic32(test_case.TestCase, _HMCTests): dtype = np.float32 use_static_shape = False # This test runs in graph mode only to reduce test weight. -class HMCTestsStatic64(tf.test.TestCase, _HMCTests): +class HMCTestsStatic64(test_case.TestCase, _HMCTests): dtype = np.float64 use_static_shape = True diff --git a/tensorflow_probability/python/sts/forecast_test.py b/tensorflow_probability/python/sts/forecast_test.py index 9c4bcc6882..eba9a68d33 100644 --- a/tensorflow_probability/python/sts/forecast_test.py +++ b/tensorflow_probability/python/sts/forecast_test.py @@ -15,15 +15,18 @@ """Tests for STS forecasting methods.""" # Dependency imports + import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util from tensorflow.python.platform import test tfl = tf.linalg -tfd = tfp.distributions class _ForecastTest(object): @@ -99,7 +102,7 @@ def test_one_step_predictive_with_batch_shape(self): onestep_dist = tfp.sts.one_step_predictive(model, observed_time_series, parameter_samples=prior_samples) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) if self.use_static_shape: self.assertAllEqual(onestep_dist.batch_shape.as_list(), batch_shape) else: @@ -173,7 +176,7 @@ def test_forecast_from_hmc(self): sample_shape = [10] forecast_samples = forecast_dist.sample(sample_shape)[..., 0] - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) forecast_mean_, forecast_scale_, forecast_samples_ = self.evaluate( (forecast_mean, forecast_scale, forecast_samples)) self.assertAllEqual(forecast_mean_.shape, @@ -203,7 +206,7 @@ def test_forecast_with_batch_shape(self): parameter_samples=prior_samples, num_steps_forecast=num_steps_forecast) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) if self.use_static_shape: self.assertAllEqual(forecast_dist.batch_shape.as_list(), batch_shape) else: @@ -313,24 +316,24 @@ def _build_tensor(self, ndarray, dtype=None): """ ndarray = np.asarray(ndarray).astype(self.dtype if dtype is None else dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) @test_util.run_all_in_graph_and_eager_modes -class ForecastTestStatic32(tf.test.TestCase, _ForecastTest): +class ForecastTestStatic32(test_case.TestCase, _ForecastTest): dtype = np.float32 use_static_shape = True # Run in graph mode only to reduce test weight. -class ForecastTestDynamic32(tf.test.TestCase, _ForecastTest): +class ForecastTestDynamic32(test_case.TestCase, _ForecastTest): dtype = np.float32 use_static_shape = False # Run in graph mode only to reduce test weight. -class ForecastTestStatic64(tf.test.TestCase, _ForecastTest): +class ForecastTestStatic64(test_case.TestCase, _ForecastTest): dtype = np.float64 use_static_shape = True diff --git a/tensorflow_probability/python/sts/internal/BUILD b/tensorflow_probability/python/sts/internal/BUILD index a63ce21183..bd0932aa1f 100644 --- a/tensorflow_probability/python/sts/internal/BUILD +++ b/tensorflow_probability/python/sts/internal/BUILD @@ -55,6 +55,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -79,5 +80,6 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/sts/internal/missing_values_util.py b/tensorflow_probability/python/sts/internal/missing_values_util.py index 38fa1c0884..4ac455ed55 100644 --- a/tensorflow_probability/python/sts/internal/missing_values_util.py +++ b/tensorflow_probability/python/sts/internal/missing_values_util.py @@ -20,7 +20,8 @@ import collections # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf tfl = tf.linalg @@ -130,13 +131,13 @@ def moments_of_masked_time_series(time_series_tensor, broadcast_mask): # Manually compute mean and variance, excluding masked entries. mean = ( tf.reduce_sum( - input_tensor=tf.compat.v1.where(broadcast_mask, + input_tensor=tf1.where(broadcast_mask, tf.zeros_like(time_series_tensor), time_series_tensor), axis=-1) / num_unmasked_entries) variance = ( tf.reduce_sum( - input_tensor=tf.compat.v1.where( + input_tensor=tf1.where( broadcast_mask, tf.zeros_like(time_series_tensor), (time_series_tensor - mean[..., tf.newaxis])**2), axis=-1) / num_unmasked_entries) diff --git a/tensorflow_probability/python/sts/internal/missing_values_util_test.py b/tensorflow_probability/python/sts/internal/missing_values_util_test.py index c56887710e..fe52903b08 100644 --- a/tensorflow_probability/python/sts/internal/missing_values_util_test.py +++ b/tensorflow_probability/python/sts/internal/missing_values_util_test.py @@ -19,14 +19,15 @@ from __future__ import print_function import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.sts.internal import missing_values_util - from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -class _MissingValuesUtilityTests(tf.test.TestCase): +class _MissingValuesUtilityTests(test_case.TestCase): def testMoments(self): series = np.random.randn(2, 4) @@ -76,7 +77,7 @@ def _build_tensor(self, ndarray, dtype=None): """ ndarray = np.asarray(ndarray).astype(self.dtype if dtype is None else dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) diff --git a/tensorflow_probability/python/sts/internal/util_test.py b/tensorflow_probability/python/sts/internal/util_test.py index 2416b2dbdd..4f80796fbd 100644 --- a/tensorflow_probability/python/sts/internal/util_test.py +++ b/tensorflow_probability/python/sts/internal/util_test.py @@ -19,18 +19,19 @@ from __future__ import print_function import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp from tensorflow_probability import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.sts.internal import missing_values_util from tensorflow_probability.python.sts.internal import util as sts_util - from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class MultivariateNormalUtilsTest(tf.test.TestCase): +class MultivariateNormalUtilsTest(test_case.TestCase): def test_factored_joint_mvn_diag_full(self): batch_shape = [3, 2] @@ -144,7 +145,7 @@ def test_sum_mvns_broadcast_batch_shape(self): mvn3.covariance())) -class _UtilityTests(tf.test.TestCase): +class _UtilityTests(test_case.TestCase): def test_broadcast_batch_shape(self): batch_shapes = ([2], [3, 2], [1, 2]) @@ -289,7 +290,7 @@ def _build_tensor(self, ndarray): """ ndarray = np.asarray(ndarray).astype(self.dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) diff --git a/tensorflow_probability/python/sts/local_level.py b/tensorflow_probability/python/sts/local_level.py index d05f5c7e31..773df1eb2a 100644 --- a/tensorflow_probability/python/sts/local_level.py +++ b/tensorflow_probability/python/sts/local_level.py @@ -18,7 +18,8 @@ from __future__ import print_function # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd @@ -144,7 +145,7 @@ def __init__(self, Default value: "LocalLevelStateSpaceModel". """ - with tf.compat.v1.name_scope(name, 'LocalLevelStateSpaceModel', + with tf1.name_scope(name, 'LocalLevelStateSpaceModel', [level_scale]) as name: # The initial state prior determines the dtype of sampled values. @@ -234,7 +235,7 @@ def __init__(self, Default value: 'LocalLevel'. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'LocalLevel', values=[observed_time_series]) as name: dtype = dtype_util.common_dtype([level_scale_prior, initial_level_prior]) diff --git a/tensorflow_probability/python/sts/local_level_test.py b/tensorflow_probability/python/sts/local_level_test.py index 84f4fb9278..2bd7713d8e 100644 --- a/tensorflow_probability/python/sts/local_level_test.py +++ b/tensorflow_probability/python/sts/local_level_test.py @@ -19,13 +19,16 @@ from __future__ import print_function # Dependency imports + import numpy as np -import tensorflow as tf -import tensorflow_probability as tfp +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.sts import LocalLevelStateSpaceModel -tfd = tfp.distributions from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + tfl = tf.linalg @@ -96,27 +99,27 @@ def _build_placeholder(self, ndarray): """ ndarray = np.asarray(ndarray).astype(self.dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) @test_util.run_all_in_graph_and_eager_modes class LocalLevelStateSpaceModelTestStaticShape32( - tf.test.TestCase, _LocalLevelStateSpaceModelTest): + test_case.TestCase, _LocalLevelStateSpaceModelTest): dtype = np.float32 use_static_shape = True @test_util.run_all_in_graph_and_eager_modes class LocalLevelStateSpaceModelTestDynamicShape32( - tf.test.TestCase, _LocalLevelStateSpaceModelTest): + test_case.TestCase, _LocalLevelStateSpaceModelTest): dtype = np.float32 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes class LocalLevelStateSpaceModelTestStaticShape64( - tf.test.TestCase, _LocalLevelStateSpaceModelTest): + test_case.TestCase, _LocalLevelStateSpaceModelTest): dtype = np.float64 use_static_shape = True diff --git a/tensorflow_probability/python/sts/local_linear_trend.py b/tensorflow_probability/python/sts/local_linear_trend.py index 6b63c28e5e..e63a389291 100644 --- a/tensorflow_probability/python/sts/local_linear_trend.py +++ b/tensorflow_probability/python/sts/local_linear_trend.py @@ -18,7 +18,8 @@ from __future__ import print_function # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd @@ -156,7 +157,7 @@ def __init__(self, Default value: "LocalLinearTrendStateSpaceModel". """ - with tf.compat.v1.name_scope(name, 'LocalLinearTrendStateSpaceModel', + with tf1.name_scope(name, 'LocalLinearTrendStateSpaceModel', [level_scale, slope_scale]) as name: # The initial state prior determines the dtype of sampled values. @@ -282,7 +283,7 @@ def __init__(self, Default value: 'LocalLinearTrend'. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'LocalLinearTrend', values=[observed_time_series]) as name: _, observed_stddev, observed_initial = ( diff --git a/tensorflow_probability/python/sts/local_linear_trend_test.py b/tensorflow_probability/python/sts/local_linear_trend_test.py index fcc5f7ace8..c6bf017deb 100644 --- a/tensorflow_probability/python/sts/local_linear_trend_test.py +++ b/tensorflow_probability/python/sts/local_linear_trend_test.py @@ -19,13 +19,16 @@ from __future__ import print_function # Dependency imports + import numpy as np -import tensorflow as tf -import tensorflow_probability as tfp +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.sts import LocalLinearTrendStateSpaceModel -tfd = tfp.distributions from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + tfl = tf.linalg @@ -103,27 +106,27 @@ def _build_placeholder(self, ndarray): """ ndarray = np.asarray(ndarray).astype(self.dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) @test_util.run_all_in_graph_and_eager_modes class LocalLinearTrendStateSpaceModelTestStaticShape32( - tf.test.TestCase, _LocalLinearTrendStateSpaceModelTest): + test_case.TestCase, _LocalLinearTrendStateSpaceModelTest): dtype = np.float32 use_static_shape = True @test_util.run_all_in_graph_and_eager_modes class LocalLinearTrendStateSpaceModelTestDynamicShape32( - tf.test.TestCase, _LocalLinearTrendStateSpaceModelTest): + test_case.TestCase, _LocalLinearTrendStateSpaceModelTest): dtype = np.float32 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes class LocalLinearTrendStateSpaceModelTestStaticShape64( - tf.test.TestCase, _LocalLinearTrendStateSpaceModelTest): + test_case.TestCase, _LocalLinearTrendStateSpaceModelTest): dtype = np.float64 use_static_shape = True diff --git a/tensorflow_probability/python/sts/regression.py b/tensorflow_probability/python/sts/regression.py index 5967226d30..f9b5084899 100644 --- a/tensorflow_probability/python/sts/regression.py +++ b/tensorflow_probability/python/sts/regression.py @@ -18,7 +18,8 @@ from __future__ import print_function # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd @@ -179,7 +180,7 @@ def __init__(self, name: the name of this model component. Default value: 'LinearRegression'. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'LinearRegression', values=[design_matrix]) as name: if not isinstance(design_matrix, tfl.LinearOperator): @@ -406,7 +407,7 @@ def __init__(self, name: the name of this model component. Default value: 'SparseLinearRegression'. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'SparseLinearRegression', values=[design_matrix, weights_prior_scale]) as name: diff --git a/tensorflow_probability/python/sts/regression_test.py b/tensorflow_probability/python/sts/regression_test.py index 76881abfa7..9bfb25e0e2 100644 --- a/tensorflow_probability/python/sts/regression_test.py +++ b/tensorflow_probability/python/sts/regression_test.py @@ -19,9 +19,12 @@ from __future__ import print_function # Dependency imports + import numpy as np -import tensorflow as tf -import tensorflow_probability as tfp +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.sts import LinearRegression from tensorflow_probability.python.sts import SparseLinearRegression from tensorflow_probability.python.sts import Sum @@ -30,10 +33,9 @@ from tensorflow.python.platform import test tfl = tf.linalg -tfd = tfp.distributions -class _LinearRegressionTest(tf.test.TestCase): +class _LinearRegressionTest(test_case.TestCase): @test_util.run_in_graph_and_eager_modes def test_basic_statistics(self): @@ -82,7 +84,7 @@ def test_simple_regression_correctness(self): model = Sum(components=[linear_regression], observation_noise_scale_prior=observation_noise_scale_prior) - learnable_weights = tf.compat.v2.Variable( + learnable_weights = tf.Variable( tf.zeros([num_features], dtype=true_weights.dtype)) def build_loss(): @@ -95,13 +97,13 @@ def build_loss(): # We provide graph- and eager-mode optimization for TF 2.0 compatibility. num_train_steps = 80 - optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=0.1) + optimizer = tf1.train.AdamOptimizer(learning_rate=0.1) if tf.executing_eagerly(): for _ in range(num_train_steps): optimizer.minimize(build_loss) else: train_op = optimizer.minimize(build_loss()) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) for _ in range(num_train_steps): _ = self.evaluate(train_op) self.assertAllClose(*self.evaluate((true_weights, learnable_weights)), @@ -150,11 +152,11 @@ def _build_placeholder(self, ndarray): """ ndarray = np.asarray(ndarray).astype(self.dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) -class _SparseLinearRegressionTest(tf.test.TestCase): +class _SparseLinearRegressionTest(test_case.TestCase): @test_util.run_in_graph_and_eager_modes def test_builds_without_errors(self): @@ -166,7 +168,7 @@ def test_builds_without_errors(self): weights_batch_shape = [] if not self.use_static_shape: - weights_batch_shape = tf.compat.v1.placeholder_with_default( + weights_batch_shape = tf1.placeholder_with_default( np.array(weights_batch_shape, dtype=np.int32), shape=None) sparse_regression = SparseLinearRegression( design_matrix=design_matrix, @@ -196,7 +198,7 @@ def _build_placeholder(self, ndarray): """ ndarray = np.asarray(ndarray).astype(self.dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) diff --git a/tensorflow_probability/python/sts/seasonal.py b/tensorflow_probability/python/sts/seasonal.py index 05d3ccf8df..ec5eae5d19 100644 --- a/tensorflow_probability/python/sts/seasonal.py +++ b/tensorflow_probability/python/sts/seasonal.py @@ -19,7 +19,8 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd @@ -208,7 +209,7 @@ def __init__(self, {seasonal_init_args} """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'SeasonalStateSpaceModel', values=[drift_scale, observation_noise_scale]) as name: @@ -423,7 +424,7 @@ def __init__(self, {seasonal_init_args} """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'ConstrainedSeasonalStateSpaceModel', values=[drift_scale, observation_noise_scale]) as name: @@ -575,7 +576,7 @@ def build_seasonal_transition_matrix( basis_change_matrix=None, basis_change_matrix_inv=None): """Build a function computing transitions for a seasonal effect model.""" - with tf.compat.v1.name_scope('build_seasonal_transition_matrix'): + with tf1.name_scope('build_seasonal_transition_matrix'): # If the season is changing, the transition matrix permutes the latent # state to shift all seasons up by a dimension, and sends the current # season's effect to the bottom. @@ -823,7 +824,7 @@ def __init__(self, Default value: 'Seasonal'. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'Seasonal', values=[observed_time_series]) as name: _, observed_stddev, observed_initial = ( diff --git a/tensorflow_probability/python/sts/seasonal_test.py b/tensorflow_probability/python/sts/seasonal_test.py index d7b655ca77..3b1eff3389 100644 --- a/tensorflow_probability/python/sts/seasonal_test.py +++ b/tensorflow_probability/python/sts/seasonal_test.py @@ -19,19 +19,21 @@ from __future__ import print_function # Dependency imports -import numpy as np -import tensorflow as tf -import tensorflow_probability as tfp +import numpy as np +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.sts import ConstrainedSeasonalStateSpaceModel from tensorflow_probability.python.sts import SeasonalStateSpaceModel -tfd = tfp.distributions from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + tfl = tf.linalg -class _SeasonalStateSpaceModelTest(tf.test.TestCase): +class _SeasonalStateSpaceModelTest(test_case.TestCase): def test_day_of_week_example(self): @@ -283,7 +285,7 @@ def _build_placeholder(self, ndarray): """ ndarray = np.asarray(ndarray).astype(self.dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) @@ -305,7 +307,7 @@ class SeasonalStateSpaceModelTestStaticShape64(_SeasonalStateSpaceModelTest): use_static_shape = True -class _ConstrainedSeasonalStateSpaceModelTest(tf.test.TestCase): +class _ConstrainedSeasonalStateSpaceModelTest(test_case.TestCase): # TODO(b/128635942): write additional tests for ConstrainedSeasonalSSM @@ -377,7 +379,7 @@ def _build_placeholder(self, ndarray): """ ndarray = np.asarray(ndarray).astype(self.dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) diff --git a/tensorflow_probability/python/sts/semilocal_linear_trend.py b/tensorflow_probability/python/sts/semilocal_linear_trend.py index 6205cd6fd7..fbad0e2728 100644 --- a/tensorflow_probability/python/sts/semilocal_linear_trend.py +++ b/tensorflow_probability/python/sts/semilocal_linear_trend.py @@ -18,7 +18,8 @@ from __future__ import print_function # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd @@ -174,7 +175,7 @@ def __init__(self, Default value: "SemiLocalLinearTrendStateSpaceModel". """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'SemiLocalLinearTrendStateSpaceModel', values=[level_scale, slope_mean, slope_scale, @@ -385,7 +386,7 @@ def __init__(self, Default value: 'SemiLocalLinearTrend'. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'SemiLocalLinearTrend', values=[observed_time_series]) as name: if observed_time_series is not None: diff --git a/tensorflow_probability/python/sts/semilocal_linear_trend_test.py b/tensorflow_probability/python/sts/semilocal_linear_trend_test.py index 0ff2fc5bd3..93ca868318 100644 --- a/tensorflow_probability/python/sts/semilocal_linear_trend_test.py +++ b/tensorflow_probability/python/sts/semilocal_linear_trend_test.py @@ -19,13 +19,15 @@ from __future__ import print_function # Dependency imports + import numpy as np -import tensorflow as tf -import tensorflow_probability as tfp +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.sts import LocalLinearTrendStateSpaceModel from tensorflow_probability.python.sts import SemiLocalLinearTrendStateSpaceModel -tfd = tfp.distributions from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -180,27 +182,27 @@ def _build_placeholder(self, ndarray): """ ndarray = np.asarray(ndarray).astype(self.dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) @test_util.run_all_in_graph_and_eager_modes class SemiLocalLinearTrendStateSpaceModelTestStaticShape32( - tf.test.TestCase, _SemiLocalLinearTrendStateSpaceModelTest): + test_case.TestCase, _SemiLocalLinearTrendStateSpaceModelTest): dtype = np.float32 use_static_shape = True @test_util.run_all_in_graph_and_eager_modes class SemiLocalLinearTrendStateSpaceModelTestDynamicShape32( - tf.test.TestCase, _SemiLocalLinearTrendStateSpaceModelTest): + test_case.TestCase, _SemiLocalLinearTrendStateSpaceModelTest): dtype = np.float32 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes class SemiLocalLinearTrendStateSpaceModelTestStaticShape64( - tf.test.TestCase, _SemiLocalLinearTrendStateSpaceModelTest): + test_case.TestCase, _SemiLocalLinearTrendStateSpaceModelTest): dtype = np.float64 use_static_shape = True diff --git a/tensorflow_probability/python/sts/smooth_seasonal_test.py b/tensorflow_probability/python/sts/smooth_seasonal_test.py index 963419ed4b..24e48166f1 100644 --- a/tensorflow_probability/python/sts/smooth_seasonal_test.py +++ b/tensorflow_probability/python/sts/smooth_seasonal_test.py @@ -19,17 +19,17 @@ from __future__ import print_function # Dependency imports + import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf -import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.sts import SmoothSeasonal from tensorflow_probability.python.sts import SmoothSeasonalStateSpaceModel from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions - class _SmoothSeasonalStateSpaceModelTest(object): @@ -142,21 +142,21 @@ def _build_placeholder(self, ndarray): @test_util.run_all_in_graph_and_eager_modes class SmoothSeasonalStateSpaceModelTestStaticShape32( - tf.test.TestCase, _SmoothSeasonalStateSpaceModelTest): + test_case.TestCase, _SmoothSeasonalStateSpaceModelTest): dtype = np.float32 use_static_shape = True @test_util.run_all_in_graph_and_eager_modes class SmoothSeasonalStateSpaceModelTestDynamicShape32( - tf.test.TestCase, _SmoothSeasonalStateSpaceModelTest): + test_case.TestCase, _SmoothSeasonalStateSpaceModelTest): dtype = np.float32 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes class SmoothSeasonalStateSpaceModelTestStaticShape64( - tf.test.TestCase, _SmoothSeasonalStateSpaceModelTest): + test_case.TestCase, _SmoothSeasonalStateSpaceModelTest): dtype = np.float64 use_static_shape = True diff --git a/tensorflow_probability/python/sts/structural_time_series.py b/tensorflow_probability/python/sts/structural_time_series.py index 43b9b724fe..8b8dcaa3d0 100644 --- a/tensorflow_probability/python/sts/structural_time_series.py +++ b/tensorflow_probability/python/sts/structural_time_series.py @@ -20,7 +20,8 @@ import collections # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import distributions from tensorflow_probability.python.internal import distribution_util @@ -195,7 +196,7 @@ def prior_sample(self, seed = distributions.SeedStream( seed, salt='StructuralTimeSeries_prior_sample') - with tf.compat.v1.name_scope( + with tf1.name_scope( 'prior_sample', values=[num_timesteps, params_sample_shape, trajectories_sample_shape]): param_samples = [ @@ -232,7 +233,7 @@ def joint_log_prob(self, observed_time_series): inference. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( 'joint_log_prob', values=[observed_time_series]): [ observed_time_series, diff --git a/tensorflow_probability/python/sts/structural_time_series_test.py b/tensorflow_probability/python/sts/structural_time_series_test.py index 5304471630..d4cc1f6027 100644 --- a/tensorflow_probability/python/sts/structural_time_series_test.py +++ b/tensorflow_probability/python/sts/structural_time_series_test.py @@ -19,11 +19,13 @@ from __future__ import print_function import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp - +from tensorflow_probability.python import bijectors as tfb +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util - from tensorflow_probability.python.sts import Autoregressive from tensorflow_probability.python.sts import DynamicLinearRegression from tensorflow_probability.python.sts import LinearRegression @@ -36,8 +38,6 @@ from tensorflow_probability.python.sts import Sum from tensorflow_probability.python.sts.internal import util as sts_util -tfd = tfp.distributions -tfb = tfp.bijectors from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @@ -103,27 +103,27 @@ def _build_placeholder(self, ndarray, dtype=None): dtype = self.dtype ndarray = np.asarray(ndarray).astype(dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) @test_util.run_all_in_graph_and_eager_modes class StructuralTimeSeriesTestsStaticShape32( - _StructuralTimeSeriesTests, tf.test.TestCase): + _StructuralTimeSeriesTests, test_case.TestCase): dtype = np.float32 use_static_shape = True @test_util.run_all_in_graph_and_eager_modes class StructuralTimeSeriesTestsDynamicShape32( - _StructuralTimeSeriesTests, tf.test.TestCase): + _StructuralTimeSeriesTests, test_case.TestCase): dtype = np.float32 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes class StructuralTimeSeriesTestsStaticShape64( - _StructuralTimeSeriesTests, tf.test.TestCase): + _StructuralTimeSeriesTests, test_case.TestCase): dtype = np.float64 use_static_shape = True @@ -256,28 +256,28 @@ def test_default_priors_follow_batch_shapes(self): @test_util.run_all_in_graph_and_eager_modes -class AutoregressiveTest(tf.test.TestCase, _StsTestHarness): +class AutoregressiveTest(test_case.TestCase, _StsTestHarness): def _build_sts(self, observed_time_series=None): return Autoregressive(order=3, observed_time_series=observed_time_series) @test_util.run_all_in_graph_and_eager_modes -class LocalLevelTest(tf.test.TestCase, _StsTestHarness): +class LocalLevelTest(test_case.TestCase, _StsTestHarness): def _build_sts(self, observed_time_series=None): return LocalLevel(observed_time_series=observed_time_series) @test_util.run_all_in_graph_and_eager_modes -class LocalLinearTrendTest(tf.test.TestCase, _StsTestHarness): +class LocalLinearTrendTest(test_case.TestCase, _StsTestHarness): def _build_sts(self, observed_time_series=None): return LocalLinearTrend(observed_time_series=observed_time_series) @test_util.run_all_in_graph_and_eager_modes -class SeasonalTest(tf.test.TestCase, _StsTestHarness): +class SeasonalTest(test_case.TestCase, _StsTestHarness): def _build_sts(self, observed_time_series=None): # Note that a Seasonal model with `num_steps_per_season > 1` would have @@ -294,7 +294,7 @@ def _build_sts(self, observed_time_series=None): @test_util.run_all_in_graph_and_eager_modes -class SeasonalWithZeroMeanConstraintTest(tf.test.TestCase, _StsTestHarness): +class SeasonalWithZeroMeanConstraintTest(test_case.TestCase, _StsTestHarness): def _build_sts(self, observed_time_series=None): return Seasonal(num_seasons=7, @@ -304,7 +304,8 @@ def _build_sts(self, observed_time_series=None): @test_util.run_all_in_graph_and_eager_modes -class SeasonalWithMultipleStepsAndNoiseTest(tf.test.TestCase, _StsTestHarness): +class SeasonalWithMultipleStepsAndNoiseTest(test_case.TestCase, + _StsTestHarness): def _build_sts(self, observed_time_series=None): day_of_week = tfp.sts.Seasonal(num_seasons=7, @@ -316,14 +317,14 @@ def _build_sts(self, observed_time_series=None): @test_util.run_all_in_graph_and_eager_modes -class SemiLocalLinearTrendTest(tf.test.TestCase, _StsTestHarness): +class SemiLocalLinearTrendTest(test_case.TestCase, _StsTestHarness): def _build_sts(self, observed_time_series=None): return SemiLocalLinearTrend(observed_time_series=observed_time_series) @test_util.run_all_in_graph_and_eager_modes -class SmoothSeasonalTest(tf.test.TestCase, _StsTestHarness): +class SmoothSeasonalTest(test_case.TestCase, _StsTestHarness): def _build_sts(self, observed_time_series=None): return SmoothSeasonal(period=42, @@ -332,7 +333,7 @@ def _build_sts(self, observed_time_series=None): @test_util.run_all_in_graph_and_eager_modes -class SumTest(tf.test.TestCase, _StsTestHarness): +class SumTest(test_case.TestCase, _StsTestHarness): def _build_sts(self, observed_time_series=None): first_component = LocalLinearTrend( @@ -345,7 +346,7 @@ def _build_sts(self, observed_time_series=None): @test_util.run_all_in_graph_and_eager_modes -class LinearRegressionTest(tf.test.TestCase, _StsTestHarness): +class LinearRegressionTest(test_case.TestCase, _StsTestHarness): def _build_sts(self, observed_time_series=None): max_timesteps = 100 @@ -374,7 +375,7 @@ def _build_sts(self, observed_time_series=None): @test_util.run_all_in_graph_and_eager_modes -class SparseLinearRegressionTest(tf.test.TestCase, _StsTestHarness): +class SparseLinearRegressionTest(test_case.TestCase, _StsTestHarness): def _build_sts(self, observed_time_series=None): max_timesteps = 100 @@ -399,7 +400,7 @@ def _build_sts(self, observed_time_series=None): @test_util.run_all_in_graph_and_eager_modes -class DynamicLinearRegressionTest(tf.test.TestCase, _StsTestHarness): +class DynamicLinearRegressionTest(test_case.TestCase, _StsTestHarness): def _build_sts(self, observed_time_series=None): max_timesteps = 100 diff --git a/tensorflow_probability/python/sts/sum.py b/tensorflow_probability/python/sts/sum.py index 1033571678..f130fb95de 100644 --- a/tensorflow_probability/python/sts/sum.py +++ b/tensorflow_probability/python/sts/sum.py @@ -21,7 +21,8 @@ import collections # Dependency imports -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd @@ -213,7 +214,7 @@ def __init__(self, ValueError: if components have different `num_timesteps`. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'AdditiveStateSpaceModel', values=[observation_noise_scale, initial_step]) as name: @@ -252,7 +253,7 @@ def __init__(self, num_timesteps = component_ssms[0].num_timesteps if validate_args and len(static_num_timesteps) != len(component_ssms): assertions += [ - tf.compat.v1.assert_equal( + tf1.assert_equal( num_timesteps, ssm.num_timesteps, message='Additive model components must all have ' @@ -413,7 +414,7 @@ def __init__(self, ValueError: if components do not have unique names. """ - with tf.compat.v1.name_scope( + with tf1.name_scope( name, 'Sum', values=[observed_time_series]) as name: if observed_time_series is not None: observed_mean, observed_stddev, _ = ( @@ -494,7 +495,7 @@ def make_component_state_space_models(self, Distribution objects, in order corresponding to `self.components`. """ - with tf.compat.v1.name_scope('make_component_state_space_models'): + with tf1.name_scope('make_component_state_space_models'): # List the model parameters in canonical order param_map = self._canonicalize_param_vals_as_map(param_vals) diff --git a/tensorflow_probability/python/sts/sum_test.py b/tensorflow_probability/python/sts/sum_test.py index 165ff9d248..39fcd7bee4 100644 --- a/tensorflow_probability/python/sts/sum_test.py +++ b/tensorflow_probability/python/sts/sum_test.py @@ -20,18 +20,20 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability import distributions as tfd - +from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.sts import AdditiveStateSpaceModel from tensorflow_probability.python.sts import LocalLinearTrendStateSpaceModel - from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + + tfl = tf.linalg -class _AdditiveStateSpaceModelTest(tf.test.TestCase): +class _AdditiveStateSpaceModelTest(test_case.TestCase): def test_identity(self): @@ -384,7 +386,7 @@ def _build_placeholder(self, ndarray, dtype=None): """ dtype = dtype if dtype is not None else self.dtype ndarray = np.asarray(ndarray).astype(dtype) - return tf.compat.v1.placeholder_with_default( + return tf1.placeholder_with_default( input=ndarray, shape=ndarray.shape if self.use_static_shape else None) def _dummy_model(self, @@ -434,7 +436,7 @@ def test_dynamic_num_timesteps(self): # (not necessarily the first) has static num_timesteps. num_timesteps = 4 dynamic_timesteps_component = self._dummy_model( - num_timesteps=tf.compat.v1.placeholder_with_default( + num_timesteps=tf1.placeholder_with_default( input=num_timesteps, shape=None)) static_timesteps_component = self._dummy_model( num_timesteps=num_timesteps) diff --git a/tensorflow_probability/python/trainable_distributions/BUILD b/tensorflow_probability/python/trainable_distributions/BUILD index 3522e5642c..be73194ad3 100644 --- a/tensorflow_probability/python/trainable_distributions/BUILD +++ b/tensorflow_probability/python/trainable_distributions/BUILD @@ -54,5 +54,6 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/trainable_distributions/trainable_distributions_lib.py b/tensorflow_probability/python/trainable_distributions/trainable_distributions_lib.py index 101256aed5..ea175afb69 100644 --- a/tensorflow_probability/python/trainable_distributions/trainable_distributions_lib.py +++ b/tensorflow_probability/python/trainable_distributions/trainable_distributions_lib.py @@ -24,7 +24,8 @@ from __future__ import division from __future__ import print_function -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python import math as tfp_math @@ -60,7 +61,7 @@ def softplus_and_shift(x, shift=1e-5, name=None): Returns: scale: (Batch of) scalars`with `x.dtype` and `x.shape`. """ - with tf.compat.v1.name_scope(name, 'softplus_and_shift', [x, shift]): + with tf1.name_scope(name, 'softplus_and_shift', [x, shift]): x = tf.convert_to_tensor(value=x, name='x') y = tf.nn.softplus(x) if shift is not None: @@ -92,7 +93,7 @@ def tril_with_diag_softplus_and_shift(x, diag_shift=1e-5, name=None): rightmost shape `[dims, dims]` where `n = dims * (dims + 1) / 2` where `n = x.shape[-1]`. """ - with tf.compat.v1.name_scope(name, 'tril_with_diag_softplus_and_shift', + with tf1.name_scope(name, 'tril_with_diag_softplus_and_shift', [x, diag_shift]): x = tf.convert_to_tensor(value=x, name='x') x = tfp_math.fill_triangular(x) @@ -108,7 +109,7 @@ def tril_with_diag_softplus_and_shift(x, diag_shift=1e-5, name=None): warn_once=True) def multivariate_normal_tril(x, dims, - layer_fn=tf.compat.v1.layers.dense, + layer_fn=tf1.layers.dense, loc_fn=lambda x: x, scale_fn=tril_with_diag_softplus_and_shift, name=None): @@ -206,7 +207,7 @@ def make_training_data(): Returns: mvntril: An instance of `tfd.MultivariateNormalTriL`. """ - with tf.compat.v1.name_scope(name, 'multivariate_normal_tril', [x, dims]): + with tf1.name_scope(name, 'multivariate_normal_tril', [x, dims]): x = tf.convert_to_tensor(value=x, name='x') x = layer_fn(x, dims + dims * (dims + 1) // 2) return tfd.MultivariateNormalTriL( @@ -219,7 +220,7 @@ def make_training_data(): '`multivariate_normal_tril` is deprecated; ' 'use `tfp.layers.DistributionLambda` or `tfp.util.DeferredTensor`.', warn_once=True) -def bernoulli(x, layer_fn=tf.compat.v1.layers.dense, name=None): +def bernoulli(x, layer_fn=tf1.layers.dense, name=None): """Constructs a trainable `tfd.Bernoulli` distribution. This function creates a Bernoulli distribution parameterized by logits. @@ -297,7 +298,7 @@ def make_training_data(): Returns: bernoulli: An instance of `tfd.Bernoulli`. """ - with tf.compat.v1.name_scope(name, 'bernoulli', [x]): + with tf1.name_scope(name, 'bernoulli', [x]): x = tf.convert_to_tensor(value=x, name='x') logits = tf.squeeze(layer_fn(x, 1), axis=-1) return tfd.Bernoulli(logits=logits) @@ -309,7 +310,7 @@ def make_training_data(): 'use `tfp.layers.DistributionLambda` or `tfp.util.DeferredTensor`.', warn_once=True) def normal(x, - layer_fn=tf.compat.v1.layers.dense, + layer_fn=tf1.layers.dense, loc_fn=lambda x: x, scale_fn=1., name=None): @@ -401,7 +402,7 @@ def make_training_data(): Returns: normal: An instance of `tfd.Normal`. """ - with tf.compat.v1.name_scope(name, 'normal', [x]): + with tf1.name_scope(name, 'normal', [x]): x = tf.convert_to_tensor(value=x, name='x') if callable(scale_fn): y = layer_fn(x, 2) @@ -420,7 +421,7 @@ def make_training_data(): 'use `tfp.layers.DistributionLambda` or `tfp.util.DeferredTensor`.', warn_once=True) def poisson(x, - layer_fn=tf.compat.v1.layers.dense, + layer_fn=tf1.layers.dense, log_rate_fn=lambda x: x, name=None): """Constructs a trainable `tfd.Poisson` distribution. @@ -504,7 +505,7 @@ def make_training_data(): Returns: poisson: An instance of `tfd.Poisson`. """ - with tf.compat.v1.name_scope(name, 'poisson', [x]): + with tf1.name_scope(name, 'poisson', [x]): x = tf.convert_to_tensor(value=x, name='x') log_rate = log_rate_fn(tf.squeeze(layer_fn(x, 1), axis=-1)) return tfd.Poisson(log_rate=log_rate) diff --git a/tensorflow_probability/python/trainable_distributions/trainable_distributions_lib_test.py b/tensorflow_probability/python/trainable_distributions/trainable_distributions_lib_test.py index cec220002c..879f75e09a 100644 --- a/tensorflow_probability/python/trainable_distributions/trainable_distributions_lib_test.py +++ b/tensorflow_probability/python/trainable_distributions/trainable_distributions_lib_test.py @@ -19,17 +19,18 @@ from __future__ import print_function # Dependency imports -import numpy as np -import tensorflow as tf +import numpy as np +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp -from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import +from tensorflow_probability.python.internal import test_case -tfd = tfp.distributions +from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes -class TestMVNTriL(tf.test.TestCase): +class TestMVNTriL(test_case.TestCase): def setUp(self): np.random.seed(142) @@ -48,7 +49,7 @@ def testDefaultsYieldCorrectShapesAndValues(self): tf.zeros(np.concatenate([batch_shape, [mvn_size]]), scale.dtype)) scale_diag = tf.linalg.diag_part(scale) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) [ batch_shape_, event_shape_, @@ -90,7 +91,7 @@ def testNonDefaultsYieldCorrectShapesAndValues(self): num_lower=-1, num_upper=0) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) [ batch_shape_, event_shape_, @@ -116,7 +117,7 @@ def testNonDefaultsYieldCorrectShapesAndValues(self): @test_util.run_all_in_graph_and_eager_modes -class TestBernoulli(tf.test.TestCase): +class TestBernoulli(test_case.TestCase): def setUp(self): np.random.seed(142) @@ -129,7 +130,7 @@ def testDefaultsYieldCorrectShape(self): x = tf.constant(x_) bernoulli = tfp.trainable_distributions.bernoulli(x) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) [ batch_shape_, event_shape_, @@ -175,7 +176,7 @@ def testNonDefaultsYieldCorrectShapeAndValues(self): @test_util.run_all_in_graph_and_eager_modes -class TestNormal(tf.test.TestCase): +class TestNormal(test_case.TestCase): def setUp(self): np.random.seed(142) @@ -188,7 +189,7 @@ def testDefaultsYieldCorrectShape(self): x = tf.constant(x_) normal = tfp.trainable_distributions.normal(x) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) [ batch_shape_, event_shape_, @@ -234,7 +235,7 @@ def testNonDefaultsYieldCorrectShapeAndValues(self): @test_util.run_all_in_graph_and_eager_modes -class TestPoisson(tf.test.TestCase): +class TestPoisson(test_case.TestCase): def setUp(self): np.random.seed(142) @@ -247,7 +248,7 @@ def testDefaultsYieldCorrectShape(self): x = tf.constant(x_) poisson = tfp.trainable_distributions.poisson(x) - self.evaluate(tf.compat.v1.global_variables_initializer()) + self.evaluate(tf1.global_variables_initializer()) [ batch_shape_, event_shape_, @@ -293,7 +294,7 @@ def testNonDefaultsYieldCorrectShapeAndValues(self): @test_util.run_all_in_graph_and_eager_modes -class TestMakePositiveFunctions(tf.test.TestCase): +class TestMakePositiveFunctions(test_case.TestCase): def softplus(self, x): return np.log1p(np.exp(x)) diff --git a/tensorflow_probability/python/util/BUILD b/tensorflow_probability/python/util/BUILD index de65a02895..06c9ef7f52 100644 --- a/tensorflow_probability/python/util/BUILD +++ b/tensorflow_probability/python/util/BUILD @@ -58,6 +58,7 @@ py_test( # numpy dep, # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) @@ -73,5 +74,6 @@ py_test( deps = [ # tensorflow dep, "//tensorflow_probability", + "//tensorflow_probability/python/internal:test_case", ], ) diff --git a/tensorflow_probability/python/util/deferred_tensor_test.py b/tensorflow_probability/python/util/deferred_tensor_test.py index 45aa7eee2f..7885b46ee0 100644 --- a/tensorflow_probability/python/util/deferred_tensor_test.py +++ b/tensorflow_probability/python/util/deferred_tensor_test.py @@ -22,18 +22,17 @@ from absl.testing import parameterized import numpy as np - import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python import distributions as tfd +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import -tfd = tfp.distributions - @test_util.run_all_in_graph_and_eager_modes -class DeferredTensorTest(tf.test.TestCase): +class DeferredTensorTest(test_case.TestCase): def test_docstring_example(self): trainable_normal = tfd.Normal( @@ -62,7 +61,7 @@ def test_properties(self): @test_util.run_all_in_graph_and_eager_modes -class DeferredTensorBehavesLikeTensorTest(tf.test.TestCase, +class DeferredTensorBehavesLikeTensorTest(test_case.TestCase, parameterized.TestCase): def testArrayPriority(self): diff --git a/tensorflow_probability/python/util/seed_stream_test.py b/tensorflow_probability/python/util/seed_stream_test.py index 17c7286447..028c450ed2 100644 --- a/tensorflow_probability/python/util/seed_stream_test.py +++ b/tensorflow_probability/python/util/seed_stream_test.py @@ -21,11 +21,12 @@ import tensorflow.compat.v2 as tf import tensorflow_probability as tfp +from tensorflow_probability.python.internal import test_case from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top @test_util.run_all_in_graph_and_eager_modes -class SeedStreamTest(tf.test.TestCase): +class SeedStreamTest(test_case.TestCase): def assertAllUnique(self, items): self.assertEqual(len(items), len(set(items))) diff --git a/tensorflow_probability/python/vi/csiszar_divergence_test.py b/tensorflow_probability/python/vi/csiszar_divergence_test.py index 24d7c088bb..0fe7a1a210 100644 --- a/tensorflow_probability/python/vi/csiszar_divergence_test.py +++ b/tensorflow_probability/python/vi/csiszar_divergence_test.py @@ -21,7 +21,8 @@ # Dependency imports import numpy as np -import tensorflow as tf +import tensorflow.compat.v1 as tf1 +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp from tensorflow_probability.python.internal import test_case @@ -811,7 +812,7 @@ def test_vimco_and_gradient(self): # We want the seed to be the same since we will use computations # with the same underlying sample to show correctness of vimco. if tf.executing_eagerly(): - tf.compat.v1.set_random_seed(seed) + tf1.set_random_seed(seed) x = q.sample(sample_shape=[num_draws, num_batch_draws], seed=seed) x = tf.stop_gradient(x) logu = p.log_prob(x) - q.log_prob(x) diff --git a/tensorflow_probability/python/vi/mutual_information_test.py b/tensorflow_probability/python/vi/mutual_information_test.py index a59f45a184..ebb2b1fa6c 100644 --- a/tensorflow_probability/python/vi/mutual_information_test.py +++ b/tensorflow_probability/python/vi/mutual_information_test.py @@ -20,7 +20,7 @@ import numpy as np import scipy -import tensorflow as tf +import tensorflow.compat.v2 as tf import tensorflow_probability as tfp from tensorflow_probability.python.internal import test_case from tensorflow_probability.python.internal import test_util as tfp_test_util diff --git a/tensorflow_probability/tools/BUILD b/tensorflow_probability/tools/BUILD new file mode 100644 index 0000000000..2e73e10ce9 --- /dev/null +++ b/tensorflow_probability/tools/BUILD @@ -0,0 +1,26 @@ +# Copyright 2019 The TensorFlow Probability Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +licenses(["notice"]) # Apache 2.0 + +py_binary( + name = "build_docs", + srcs = ["build_docs.py"], + python_version = "PY3", # because we want to update __doc__ on instance methods + deps = [ + # jax dep, + # tensorflow_docs/api_generator:generate_lib dep, + "//tensorflow_probability", + ], +) diff --git a/tensorflow_probability/tools/build_docs.py b/tensorflow_probability/tools/build_docs.py new file mode 100644 index 0000000000..340c5c416e --- /dev/null +++ b/tensorflow_probability/tools/build_docs.py @@ -0,0 +1,68 @@ +# Copyright 2018 The TensorFlow Probability Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Tool to generate external api_docs for tensorflow_probability. + +Note: + If duplicate or spurious docs are generated (e.g. internal names), consider + blacklisting them via the `private_map` argument below. +""" + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from absl import app +from absl import flags +from tensorflow_docs.api_generator import generate_lib +import tensorflow_probability as tfp + + +flags.DEFINE_string("output_dir", "/tmp/probability_api", + "Where to output the docs") + +flags.DEFINE_string( + "code_url_prefix", + ("https://github.com/tensorflow/probability/blob/master/" + "tensorflow_probability"), + "The url prefix for links to code.") + +flags.DEFINE_bool("search_hints", True, + "Include metadata search hints in the generated files") + +flags.DEFINE_string("site_path", "probability/api_docs/python", + "Path prefix in the _toc.yaml") + +FLAGS = flags.FLAGS + + +def main(unused_argv): + doc_generator = generate_lib.DocGenerator( + root_title="TensorFlow Probability", + py_modules=[("tfp", tfp)], + base_dir=os.path.dirname(tfp.__file__), + code_url_prefix=FLAGS.code_url_prefix, + search_hints=FLAGS.search_hints, + site_path=FLAGS.site_path, + private_map={"tfp": ["google", "staging", "python"]}, + ) + + doc_generator.build(output_dir=FLAGS.output_dir) + + +if __name__ == "__main__": + app.run(main)