diff --git a/tensorflow_probability/python/layers/conv_variational.py b/tensorflow_probability/python/layers/conv_variational.py index 1bdb7b2834..20b30720b4 100644 --- a/tensorflow_probability/python/layers/conv_variational.py +++ b/tensorflow_probability/python/layers/conv_variational.py @@ -381,7 +381,7 @@ def _apply_variational_bias(self, inputs): # As of Mar 2017, direct addition is significantly slower than # bias_add when computing gradients. To use bias_add, we collapse Z # and Y into a single dimension to obtain a 4D input tensor. - outputs_shape = outputs.shape.as_list() + outputs_shape = tf.shape(outputs) outputs_4d = tf.reshape(outputs, [outputs_shape[0], outputs_shape[1], outputs_shape[2] * outputs_shape[3], diff --git a/tensorflow_probability/python/layers/conv_variational_test.py b/tensorflow_probability/python/layers/conv_variational_test.py index e1aaa9afd7..d5f9b77ae3 100644 --- a/tensorflow_probability/python/layers/conv_variational_test.py +++ b/tensorflow_probability/python/layers/conv_variational_test.py @@ -609,7 +609,8 @@ def _testLayerInSequential(self, layer_class): # pylint: disable=invalid-name outputs = self.maybe_transpose_tensor(outputs) net = tf.keras.Sequential([ - layer_class(filters=2, kernel_size=3, data_format=self.data_format), + layer_class(filters=2, kernel_size=3, data_format=self.data_format, + input_shape=inputs.shape[1:]), layer_class(filters=2, kernel_size=1, data_format=self.data_format)]) net.compile(loss='mse', optimizer='adam')