From e3a03b4a2d6c97ee58f39da016cee02478464c5e Mon Sep 17 00:00:00 2001 From: jburnim Date: Wed, 21 Feb 2024 20:58:12 -0800 Subject: [PATCH] Update tf.keras -> tf_keras in example notebooks. Starting in TF 2.16 (and already in tf-nightly), tf.keras will refer to Keras 3 instead of Keras 2. TFP is only compatible with Keras 2, which can be imported as `tf_keras`. This change also modernizes the TensorFlow imports in these notebooks -- i.e., `import tensorflow as tf` instead of `import tensorflow.compat.v2 as tf` and removes `tf.enable_v2_behavior()`. PiperOrigin-RevId: 609224497 --- ...ussian_Process_Latent_Variable_Model.ipynb | 8 ++-- .../Gaussian_Process_Regression_In_TFP.ipynb | 6 +-- ..._Effects_Model_Variational_Inference.ipynb | 6 +-- .../Linear_Mixed_Effects_Models.ipynb | 3 +- .../Modeling_with_JointDistribution.ipynb | 11 +++-- .../Multilevel_Modeling_Primer.ipynb | 9 ++--- ...tection_and_Bayesian_model_selection.ipynb | 8 ++-- ...Optimizers_in_TensorFlow_Probability.ipynb | 8 ++-- .../Probabilistic_Layers_Regression.ipynb | 40 +++++++++---------- .../Probabilistic_Layers_VAE.ipynb | 10 ++--- .../jupyter_notebooks/Probabilistic_PCA.ipynb | 9 ++--- ...odels_with_non_Gaussian_observations.ipynb | 9 ++--- ...al_Inference_and_Joint_Distributions.ipynb | 7 ++-- 13 files changed, 65 insertions(+), 69 deletions(-) diff --git a/tensorflow_probability/examples/jupyter_notebooks/Gaussian_Process_Latent_Variable_Model.ipynb b/tensorflow_probability/examples/jupyter_notebooks/Gaussian_Process_Latent_Variable_Model.ipynb index 8ae554c36d..4accfacf65 100644 --- a/tensorflow_probability/examples/jupyter_notebooks/Gaussian_Process_Latent_Variable_Model.ipynb +++ b/tensorflow_probability/examples/jupyter_notebooks/Gaussian_Process_Latent_Variable_Model.ipynb @@ -200,8 +200,8 @@ ], "source": [ "import numpy as np\n", - "import tensorflow.compat.v2 as tf\n", - "tf.enable_v2_behavior()\n", + "import tensorflow as tf\n", + "import tf_keras\n", "import tensorflow_probability as tfp\n", "tfd = tfp.distributions\n", "tfk = tfp.math.psd_kernels\n", @@ -242,7 +242,7 @@ ], "source": [ "# Load the MNIST data set and isolate a subset of it.\n", - "(x_train, y_train), (_, _) = tf.keras.datasets.mnist.load_data()\n", + "(x_train, y_train), (_, _) = tf_keras.datasets.mnist.load_data()\n", "N = 1000\n", "small_x_train = x_train[:N, ...].astype(np.float64) / 256.\n", "small_y_train = y_train[:N]" @@ -345,7 +345,7 @@ " unconstrained_observation_noise,\n", " latent_index_points]\n", "\n", - "optimizer = tf.keras.optimizers.Adam(learning_rate=1.0)\n", + "optimizer = tf_keras.optimizers.Adam(learning_rate=1.0)\n", "\n", "@tf.function(autograph=False, jit_compile=True)\n", "def train_model():\n", diff --git a/tensorflow_probability/examples/jupyter_notebooks/Gaussian_Process_Regression_In_TFP.ipynb b/tensorflow_probability/examples/jupyter_notebooks/Gaussian_Process_Regression_In_TFP.ipynb index af1b67a7ec..1ffe1b5dd5 100644 --- a/tensorflow_probability/examples/jupyter_notebooks/Gaussian_Process_Regression_In_TFP.ipynb +++ b/tensorflow_probability/examples/jupyter_notebooks/Gaussian_Process_Regression_In_TFP.ipynb @@ -280,12 +280,12 @@ "\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", - "import tensorflow.compat.v2 as tf\n", + "import tensorflow as tf\n", + "import tf_keras\n", "import tensorflow_probability as tfp\n", "tfb = tfp.bijectors\n", "tfd = tfp.distributions\n", "tfk = tfp.math.psd_kernels\n", - "tf.enable_v2_behavior()\n", "\n", "from mpl_toolkits.mplot3d import Axes3D\n", "%pylab inline\n", @@ -541,7 +541,7 @@ "source": [ "# Now we optimize the model parameters.\n", "num_iters = 1000\n", - "optimizer = tf.keras.optimizers.Adam(learning_rate=.01)\n", + "optimizer = tf_keras.optimizers.Adam(learning_rate=.01)\n", "\n", "# Use `tf.function` to trace the loss for more efficient evaluation.\n", "@tf.function(autograph=False, jit_compile=False)\n", diff --git a/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Model_Variational_Inference.ipynb b/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Model_Variational_Inference.ipynb index 874d6fcb97..5bad248a45 100644 --- a/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Model_Variational_Inference.ipynb +++ b/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Model_Variational_Inference.ipynb @@ -292,8 +292,8 @@ "import seaborn as sns; sns.set_context('notebook')\n", "import tensorflow_datasets as tfds\n", "\n", - "import tensorflow.compat.v2 as tf\n", - "tf.enable_v2_behavior()\n", + "import tensorflow as tf\n", + "import tf_keras\n", "\n", "import tensorflow_probability as tfp\n", "tfd = tfp.distributions\n", @@ -800,7 +800,7 @@ }, "outputs": [], "source": [ - "optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)\n", + "optimizer = tf_keras.optimizers.Adam(learning_rate=1e-2)\n", "\n", "losses = tfp.vi.fit_surrogate_posterior(\n", " target_log_prob_fn, \n", diff --git a/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Models.ipynb b/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Models.ipynb index d9fb7b6b5e..d7986ea16a 100644 --- a/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Models.ipynb +++ b/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Models.ipynb @@ -95,6 +95,7 @@ "import requests\n", "\n", "import tensorflow as tf\n", + "import tf_keras\n", "import tensorflow_probability as tfp\n", "\n", "tfd = tfp.distributions\n", @@ -743,7 +744,7 @@ " previous_kernel_results=kernel_results)\n", " return next_state, next_kernel_results\n", "\n", - "optimizer = tf.keras.optimizers.Adam(learning_rate=.01)\n", + "optimizer = tf_keras.optimizers.Adam(learning_rate=.01)\n", "\n", "# Set up M-step (gradient descent).\n", "@tf.function(autograph=False, jit_compile=True)\n", diff --git a/tensorflow_probability/examples/jupyter_notebooks/Modeling_with_JointDistribution.ipynb b/tensorflow_probability/examples/jupyter_notebooks/Modeling_with_JointDistribution.ipynb index 4689a05bc4..846a8a2a23 100644 --- a/tensorflow_probability/examples/jupyter_notebooks/Modeling_with_JointDistribution.ipynb +++ b/tensorflow_probability/examples/jupyter_notebooks/Modeling_with_JointDistribution.ipynb @@ -114,9 +114,8 @@ "import pandas as pd\n", "import arviz as az\n", "\n", - "import tensorflow.compat.v2 as tf\n", - "tf.enable_v2_behavior()\n", - "\n", + "import tensorflow as tf\n", + "import tf_keras\n", "import tensorflow_probability as tfp\n", "\n", "sns.reset_defaults()\n", @@ -2885,7 +2884,7 @@ }, "outputs": [], "source": [ - "opt = tf.optimizers.Adam(learning_rate=.1)\n", + "opt = tf_keras.optimizers.Adam(learning_rate=.1)\n", "\n", "@tf.function(experimental_compile=True)\n", "def run_approximation():\n", @@ -3232,13 +3231,13 @@ }, "outputs": [], "source": [ - "learning_rate = tf.optimizers.schedules.ExponentialDecay(\n", + "learning_rate = tf_keras.optimizers.schedules.ExponentialDecay(\n", " initial_learning_rate=1e-2,\n", " decay_steps=10,\n", " decay_rate=0.99,\n", " staircase=True)\n", "\n", - "opt = tf.optimizers.Adam(learning_rate=learning_rate)\n", + "opt = tf_keras.optimizers.Adam(learning_rate=learning_rate)\n", "\n", "@tf.function(experimental_compile=True)\n", "def run_approximation():\n", diff --git a/tensorflow_probability/examples/jupyter_notebooks/Multilevel_Modeling_Primer.ipynb b/tensorflow_probability/examples/jupyter_notebooks/Multilevel_Modeling_Primer.ipynb index a329d194fe..87298b5356 100644 --- a/tensorflow_probability/examples/jupyter_notebooks/Multilevel_Modeling_Primer.ipynb +++ b/tensorflow_probability/examples/jupyter_notebooks/Multilevel_Modeling_Primer.ipynb @@ -91,14 +91,13 @@ "import seaborn as sns\n", "import warnings\n", "\n", - "import tensorflow.compat.v2 as tf\n", - "tf.enable_v2_behavior()\n", - "\n", + "import tensorflow as tf\n", + "import tf_keras\n", "import tensorflow_datasets as tfds\n", "import tensorflow_probability as tfp\n", "\n", - "tfk = tf.keras\n", - "tfkl = tf.keras.layers\n", + "tfk = tf_keras\n", + "tfkl = tf_keras.layers\n", "tfpl = tfp.layers\n", "tfd = tfp.distributions\n", "tfb = tfp.bijectors\n", diff --git a/tensorflow_probability/examples/jupyter_notebooks/Multiple_changepoint_detection_and_Bayesian_model_selection.ipynb b/tensorflow_probability/examples/jupyter_notebooks/Multiple_changepoint_detection_and_Bayesian_model_selection.ipynb index e41f6fe90a..9e16330ecb 100644 --- a/tensorflow_probability/examples/jupyter_notebooks/Multiple_changepoint_detection_and_Bayesian_model_selection.ipynb +++ b/tensorflow_probability/examples/jupyter_notebooks/Multiple_changepoint_detection_and_Bayesian_model_selection.ipynb @@ -82,8 +82,8 @@ "outputs": [], "source": [ "import numpy as np\n", - "import tensorflow.compat.v2 as tf\n", - "tf.enable_v2_behavior()\n", + "import tensorflow as tf\n", + "import tf_keras\n", "import tensorflow_probability as tfp\n", "from tensorflow_probability import distributions as tfd\n", "\n", @@ -317,7 +317,7 @@ "\n", "losses = tfp.math.minimize(\n", " lambda: -log_prob(),\n", - " optimizer=tf.keras.optimizers.Adam(learning_rate=0.1),\n", + " optimizer=tf_keras.optimizers.Adam(learning_rate=0.1),\n", " num_steps=100)\n", "plt.plot(losses)\n", "plt.ylabel('Negative log marginal likelihood')" @@ -740,7 +740,7 @@ "source": [ "losses = tfp.math.minimize(\n", " lambda: -log_prob(),\n", - " optimizer=tf.keras.optimizers.Adam(0.1),\n", + " optimizer=tf_keras.optimizers.Adam(0.1),\n", " num_steps=100)\n", "plt.plot(losses)\n", "plt.ylabel('Negative log marginal likelihood')" diff --git a/tensorflow_probability/examples/jupyter_notebooks/Optimizers_in_TensorFlow_Probability.ipynb b/tensorflow_probability/examples/jupyter_notebooks/Optimizers_in_TensorFlow_Probability.ipynb index c5c8081c25..bab8b1ea41 100644 --- a/tensorflow_probability/examples/jupyter_notebooks/Optimizers_in_TensorFlow_Probability.ipynb +++ b/tensorflow_probability/examples/jupyter_notebooks/Optimizers_in_TensorFlow_Probability.ipynb @@ -109,9 +109,9 @@ "from six.moves import urllib\n", "from sklearn import preprocessing\n", "\n", - "import tensorflow.compat.v2 as tf\n", - "tf.enable_v2_behavior()\n", - "\n", + "import tensorflow as tf\n", + "import tf_keras\n", + "\n", "import tensorflow_probability as tfp" ] }, @@ -457,7 +457,7 @@ " pred = tf.matmul(feat, tf.expand_dims(beta, axis=-1)) + intercept\n", " mse_loss = tf.reduce_sum(\n", " tf.cast(\n", - " tf.losses.mean_squared_error(y_true=lab, y_pred=pred), tf.float64))\n", + " tf_keras.losses.mean_squared_error(y_true=lab, y_pred=pred), tf.float64))\n", " l1_penalty = regularization * tf.reduce_sum(tf.abs(beta))\n", " total_loss = mse_loss + l1_penalty\n", " return total_loss" diff --git a/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_Regression.ipynb b/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_Regression.ipynb index f90231691d..976046b216 100644 --- a/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_Regression.ipynb +++ b/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_Regression.ipynb @@ -99,8 +99,8 @@ "import numpy as np\n", "import seaborn as sns\n", "\n", - "import tensorflow.compat.v2 as tf\n", - "tf.enable_v2_behavior()\n", + "import tensorflow as tf\n", + "import tf_keras\n", "\n", "import tensorflow_probability as tfp\n", "\n", @@ -283,13 +283,13 @@ ], "source": [ "# Build model.\n", - "model = tf.keras.Sequential([\n", - " tf.keras.layers.Dense(1),\n", + "model = tf_keras.Sequential([\n", + " tf_keras.layers.Dense(1),\n", " tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t, scale=1)),\n", "])\n", "\n", "# Do inference.\n", - "model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n", + "model.compile(optimizer=tf_keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n", "model.fit(x, y, epochs=1000, verbose=False);\n", "\n", "# Profit.\n", @@ -383,15 +383,15 @@ ], "source": [ "# Build model.\n", - "model = tf.keras.Sequential([\n", - " tf.keras.layers.Dense(1 + 1),\n", + "model = tf_keras.Sequential([\n", + " tf_keras.layers.Dense(1 + 1),\n", " tfp.layers.DistributionLambda(\n", " lambda t: tfd.Normal(loc=t[..., :1],\n", " scale=1e-3 + tf.math.softplus(0.05 * t[...,1:]))),\n", "])\n", "\n", "# Do inference.\n", - "model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n", + "model.compile(optimizer=tf_keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n", "model.fit(x, y, epochs=1000, verbose=False);\n", "\n", "# Profit.\n", @@ -480,7 +480,7 @@ "def posterior_mean_field(kernel_size, bias_size=0, dtype=None):\n", " n = kernel_size + bias_size\n", " c = np.log(np.expm1(1.))\n", - " return tf.keras.Sequential([\n", + " return tf_keras.Sequential([\n", " tfp.layers.VariableLayer(2 * n, dtype=dtype),\n", " tfp.layers.DistributionLambda(lambda t: tfd.Independent(\n", " tfd.Normal(loc=t[..., :n],\n", @@ -503,7 +503,7 @@ "# Specify the prior over `keras.layers.Dense` `kernel` and `bias`.\n", "def prior_trainable(kernel_size, bias_size=0, dtype=None):\n", " n = kernel_size + bias_size\n", - " return tf.keras.Sequential([\n", + " return tf_keras.Sequential([\n", " tfp.layers.VariableLayer(n, dtype=dtype),\n", " tfp.layers.DistributionLambda(lambda t: tfd.Independent(\n", " tfd.Normal(loc=t, scale=1),\n", @@ -534,13 +534,13 @@ ], "source": [ "# Build model.\n", - "model = tf.keras.Sequential([\n", + "model = tf_keras.Sequential([\n", " tfp.layers.DenseVariational(1, posterior_mean_field, prior_trainable, kl_weight=1/x.shape[0]),\n", " tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t, scale=1)),\n", "])\n", "\n", "# Do inference.\n", - "model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n", + "model.compile(optimizer=tf_keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n", "model.fit(x, y, epochs=1000, verbose=False);\n", "\n", "# Profit.\n", @@ -642,7 +642,7 @@ ], "source": [ "# Build model.\n", - "model = tf.keras.Sequential([\n", + "model = tf_keras.Sequential([\n", " tfp.layers.DenseVariational(1 + 1, posterior_mean_field, prior_trainable, kl_weight=1/x.shape[0]),\n", " tfp.layers.DistributionLambda(\n", " lambda t: tfd.Normal(loc=t[..., :1],\n", @@ -650,7 +650,7 @@ "])\n", "\n", "# Do inference.\n", - "model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n", + "model.compile(optimizer=tf_keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n", "model.fit(x, y, epochs=1000, verbose=False);\n", "\n", "# Profit.\n", @@ -741,7 +741,7 @@ "outputs": [], "source": [ "#@title Custom PSD Kernel\n", - "class RBFKernelFn(tf.keras.layers.Layer):\n", + "class RBFKernelFn(tf_keras.layers.Layer):\n", " def __init__(self, **kwargs):\n", " super(RBFKernelFn, self).__init__(**kwargs)\n", " dtype = kwargs.get('dtype', None)\n", @@ -783,13 +783,13 @@ "outputs": [], "source": [ "# For numeric stability, set the default floating-point dtype to float64\n", - "tf.keras.backend.set_floatx('float64')\n", + "tf_keras.backend.set_floatx('float64')\n", "\n", "# Build model.\n", "num_inducing_points = 40\n", - "model = tf.keras.Sequential([\n", - " tf.keras.layers.InputLayer(input_shape=[1]),\n", - " tf.keras.layers.Dense(1, kernel_initializer='ones', use_bias=False),\n", + "model = tf_keras.Sequential([\n", + " tf_keras.layers.InputLayer(input_shape=[1]),\n", + " tf_keras.layers.Dense(1, kernel_initializer='ones', use_bias=False),\n", " tfp.layers.VariationalGaussianProcess(\n", " num_inducing_points=num_inducing_points,\n", " kernel_provider=RBFKernelFn(),\n", @@ -806,7 +806,7 @@ "batch_size = 32\n", "loss = lambda y, rv_y: rv_y.variational_loss(\n", " y, kl_weight=np.array(batch_size, x.dtype) / x.shape[0])\n", - "model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), loss=loss)\n", + "model.compile(optimizer=tf_keras.optimizers.Adam(learning_rate=0.01), loss=loss)\n", "model.fit(x, y, batch_size=batch_size, epochs=1000, verbose=False)\n", "\n", "# Profit.\n", diff --git a/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_VAE.ipynb b/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_VAE.ipynb index 71cd8347ed..94c21c5bee 100644 --- a/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_VAE.ipynb +++ b/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_VAE.ipynb @@ -96,15 +96,13 @@ "\n", "import numpy as np\n", "\n", - "import tensorflow.compat.v2 as tf\n", - "tf.enable_v2_behavior()\n", - "\n", + "import tensorflow as tf\n", + "import tf_keras as tfk\n", "import tensorflow_datasets as tfds\n", "import tensorflow_probability as tfp\n", "\n", "\n", - "tfk = tf.keras\n", - "tfkl = tf.keras.layers\n", + "tfkl = tf_keras.layers\n", "tfpl = tfp.layers\n", "tfd = tfp.distributions" ] @@ -434,7 +432,7 @@ "source": [ "negloglik = lambda x, rv_x: -rv_x.log_prob(x)\n", "\n", - "vae.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),\n", + "vae.compile(optimizer=tf_keras.optimizers.Adam(learning_rate=1e-3),\n", " loss=negloglik)\n", "\n", "_ = vae.fit(train_dataset,\n", diff --git a/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_PCA.ipynb b/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_PCA.ipynb index 0de23fb122..fff443d5ff 100644 --- a/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_PCA.ipynb +++ b/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_PCA.ipynb @@ -102,14 +102,13 @@ "import numpy as np\n", "import seaborn as sns\n", "\n", - "import tensorflow.compat.v2 as tf\n", + "import tensorflow as tf\n", + "import tf_keras\n", "import tensorflow_probability as tfp\n", "\n", "from tensorflow_probability import bijectors as tfb\n", "from tensorflow_probability import distributions as tfd\n", "\n", - "tf.enable_v2_behavior()\n", - "\n", "plt.style.use(\"ggplot\")\n", "warnings.filterwarnings('ignore')" ] @@ -337,7 +336,7 @@ "target_log_prob_fn = lambda w, z: model.log_prob((w, z, x_train))\n", "losses = tfp.math.minimize(\n", " lambda: -target_log_prob_fn(w, z),\n", - " optimizer=tf.keras.optimizers.Adam(learning_rate=0.05),\n", + " optimizer=tf_keras.optimizers.Adam(learning_rate=0.05),\n", " num_steps=200)" ] }, @@ -479,7 +478,7 @@ "losses = tfp.vi.fit_surrogate_posterior(\n", " target_log_prob_fn,\n", " surrogate_posterior=surrogate_posterior,\n", - " optimizer=tf.keras.optimizers.Adam(learning_rate=0.05),\n", + " optimizer=tf_keras.optimizers.Adam(learning_rate=0.05),\n", " num_steps=200)" ] }, diff --git a/tensorflow_probability/examples/jupyter_notebooks/STS_approximate_inference_for_models_with_non_Gaussian_observations.ipynb b/tensorflow_probability/examples/jupyter_notebooks/STS_approximate_inference_for_models_with_non_Gaussian_observations.ipynb index 6c86b1969b..e10324cd99 100644 --- a/tensorflow_probability/examples/jupyter_notebooks/STS_approximate_inference_for_models_with_non_Gaussian_observations.ipynb +++ b/tensorflow_probability/examples/jupyter_notebooks/STS_approximate_inference_for_models_with_non_Gaussian_observations.ipynb @@ -77,13 +77,12 @@ "import matplotlib.pyplot as plt\n", "import numpy as np\n", "\n", - "import tensorflow.compat.v2 as tf\n", + "import tensorflow as tf\n", + "import tf_keras\n", "import tensorflow_probability as tfp\n", "\n", "from tensorflow_probability import bijectors as tfb\n", - "from tensorflow_probability import distributions as tfd\n", - "\n", - "tf.enable_v2_behavior()" + "from tensorflow_probability import distributions as tfd\n" ] }, { @@ -660,7 +659,7 @@ "t0 = time.time()\n", "losses = tfp.vi.fit_surrogate_posterior(pinned_model.unnormalized_log_prob,\n", " surrogate_posterior,\n", - " optimizer=tf.keras.optimizers.Adam(0.1),\n", + " optimizer=tf_keras.optimizers.Adam(0.1),\n", " num_steps=num_variational_steps)\n", "t1 = time.time()\n", "print(\"Inference ran in {:.2f}s.\".format(t1-t0))" diff --git a/tensorflow_probability/examples/jupyter_notebooks/Variational_Inference_and_Joint_Distributions.ipynb b/tensorflow_probability/examples/jupyter_notebooks/Variational_Inference_and_Joint_Distributions.ipynb index 604d7c8663..170eb68a44 100644 --- a/tensorflow_probability/examples/jupyter_notebooks/Variational_Inference_and_Joint_Distributions.ipynb +++ b/tensorflow_probability/examples/jupyter_notebooks/Variational_Inference_and_Joint_Distributions.ipynb @@ -167,6 +167,7 @@ "import seaborn as sns\n", "import tensorflow as tf\n", "import tensorflow_datasets as tfds\n", + "import tf_keras\n", "import tensorflow_probability as tfp\n", "import warnings\n", "\n", @@ -512,7 +513,7 @@ } ], "source": [ - "optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)\n", + "optimizer = tf_keras.optimizers.Adam(learning_rate=1e-2)\n", "mvn_loss = tfp.vi.fit_surrogate_posterior(\n", " target_model.unnormalized_log_prob,\n", " surrogate_posterior,\n", @@ -706,7 +707,7 @@ } ], "source": [ - "optimizer=tf.keras.optimizers.Adam(learning_rate=1e-2)\n", + "optimizer=tf_keras.optimizers.Adam(learning_rate=1e-2)\n", "iaf_loss = tfp.vi.fit_surrogate_posterior(\n", " target_model.unnormalized_log_prob,\n", " iaf_surrogate_posterior,\n", @@ -830,7 +831,7 @@ " mean_field_scale # apply the block matrix transformation to the standard Normal distribution\n", " ]))\n", "\n", - "optimizer=tf.keras.optimizers.Adam(learning_rate=1e-2)\n", + "optimizer=tf_keras.optimizers.Adam(learning_rate=1e-2)\n", "mean_field_loss = tfp.vi.fit_surrogate_posterior(\n", " target_model.unnormalized_log_prob,\n", " mean_field_surrogate_posterior,\n",