diff --git a/required_packages.py b/required_packages.py index cefd122969..fbb6305291 100644 --- a/required_packages.py +++ b/required_packages.py @@ -26,7 +26,6 @@ 'cloudpickle>=1.3', 'gast>=0.3.2', # For autobatching 'dm-tree', # For NumPy/JAX backends (hence, also for prefer_static) - 'typing-extensions<4.6.0', # TODO(b/284106340): Remove this pin ] if __name__ == '__main__': diff --git a/tensorflow_probability/python/experimental/linalg/linear_operator_psd_kernel_test.py b/tensorflow_probability/python/experimental/linalg/linear_operator_psd_kernel_test.py index c3e45be183..9a4aefcf13 100644 --- a/tensorflow_probability/python/experimental/linalg/linear_operator_psd_kernel_test.py +++ b/tensorflow_probability/python/experimental/linalg/linear_operator_psd_kernel_test.py @@ -271,15 +271,17 @@ def test_matmul_grad_xla_kernelparams(self): feature_dim = 3 def kernel_fn(eq_params, poly_params): - return (exponentiated_quadratic.ExponentiatedQuadratic(**eq_params) * - polynomial.Polynomial(**poly_params)) + return (exponentiated_quadratic.ExponentiatedQuadratic(*eq_params) * + polynomial.Polynomial(bias_amplitude=poly_params[0], + shift=poly_params[1])) + # TODO(b/284106340): Return this to a dictionary. kernel_args = ( - dict(length_scale=tf.random.uniform([], .5, 1.5, dtype=tf.float64), - amplitude=tf.random.uniform([], 1.5, 2.5, dtype=tf.float64)), - dict(bias_amplitude=tf.random.uniform([feature_dim], .5, 1.5, - dtype=tf.float64), - shift=tf.random.normal([feature_dim], dtype=tf.float64))) + (tf.random.uniform([], 1.5, 2.5, dtype=tf.float64), # amplitude + tf.random.uniform([], .5, 1.5, dtype=tf.float64)), # length_scale + (tf.random.uniform([feature_dim], .5, 1.5, # bias_amplitude + dtype=tf.float64), + tf.random.normal([feature_dim], dtype=tf.float64))) # shift x1 = tf.random.normal([5, feature_dim], dtype=tf.float64) x2 = tf.random.normal([7, feature_dim], dtype=tf.float64)