Skip to content

Commit

Permalink
Update tf.keras -> tf_keras in example notebooks.
Browse files Browse the repository at this point in the history
Starting in TF 2.16 (and already in tf-nightly), tf.keras will refer to Keras 3 instead of Keras 2.

TFP is only compatible with Keras 2, which can be imported as `tf_keras`.

This change also modernizes the TensorFlow imports in these notebooks -- i.e., `import tensorflow as tf` instead of `import tensorflow.compat.v2 as tf` and removes `tf.enable_v2_behavior()`.

PiperOrigin-RevId: 609224497
  • Loading branch information
jburnim authored and tensorflower-gardener committed Feb 22, 2024
1 parent 0c26a85 commit e3a03b4
Show file tree
Hide file tree
Showing 13 changed files with 65 additions and 69 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -200,8 +200,8 @@
],
"source": [
"import numpy as np\n",
"import tensorflow.compat.v2 as tf\n",
"tf.enable_v2_behavior()\n",
"import tensorflow as tf\n",
"import tf_keras\n",
"import tensorflow_probability as tfp\n",
"tfd = tfp.distributions\n",
"tfk = tfp.math.psd_kernels\n",
Expand Down Expand Up @@ -242,7 +242,7 @@
],
"source": [
"# Load the MNIST data set and isolate a subset of it.\n",
"(x_train, y_train), (_, _) = tf.keras.datasets.mnist.load_data()\n",
"(x_train, y_train), (_, _) = tf_keras.datasets.mnist.load_data()\n",
"N = 1000\n",
"small_x_train = x_train[:N, ...].astype(np.float64) / 256.\n",
"small_y_train = y_train[:N]"
Expand Down Expand Up @@ -345,7 +345,7 @@
" unconstrained_observation_noise,\n",
" latent_index_points]\n",
"\n",
"optimizer = tf.keras.optimizers.Adam(learning_rate=1.0)\n",
"optimizer = tf_keras.optimizers.Adam(learning_rate=1.0)\n",
"\n",
"@tf.function(autograph=False, jit_compile=True)\n",
"def train_model():\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -280,12 +280,12 @@
"\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"import tensorflow.compat.v2 as tf\n",
"import tensorflow as tf\n",
"import tf_keras\n",
"import tensorflow_probability as tfp\n",
"tfb = tfp.bijectors\n",
"tfd = tfp.distributions\n",
"tfk = tfp.math.psd_kernels\n",
"tf.enable_v2_behavior()\n",
"\n",
"from mpl_toolkits.mplot3d import Axes3D\n",
"%pylab inline\n",
Expand Down Expand Up @@ -541,7 +541,7 @@
"source": [
"# Now we optimize the model parameters.\n",
"num_iters = 1000\n",
"optimizer = tf.keras.optimizers.Adam(learning_rate=.01)\n",
"optimizer = tf_keras.optimizers.Adam(learning_rate=.01)\n",
"\n",
"# Use `tf.function` to trace the loss for more efficient evaluation.\n",
"@tf.function(autograph=False, jit_compile=False)\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -292,8 +292,8 @@
"import seaborn as sns; sns.set_context('notebook')\n",
"import tensorflow_datasets as tfds\n",
"\n",
"import tensorflow.compat.v2 as tf\n",
"tf.enable_v2_behavior()\n",
"import tensorflow as tf\n",
"import tf_keras\n",
"\n",
"import tensorflow_probability as tfp\n",
"tfd = tfp.distributions\n",
Expand Down Expand Up @@ -800,7 +800,7 @@
},
"outputs": [],
"source": [
"optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)\n",
"optimizer = tf_keras.optimizers.Adam(learning_rate=1e-2)\n",
"\n",
"losses = tfp.vi.fit_surrogate_posterior(\n",
" target_log_prob_fn, \n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@
"import requests\n",
"\n",
"import tensorflow as tf\n",
"import tf_keras\n",
"import tensorflow_probability as tfp\n",
"\n",
"tfd = tfp.distributions\n",
Expand Down Expand Up @@ -743,7 +744,7 @@
" previous_kernel_results=kernel_results)\n",
" return next_state, next_kernel_results\n",
"\n",
"optimizer = tf.keras.optimizers.Adam(learning_rate=.01)\n",
"optimizer = tf_keras.optimizers.Adam(learning_rate=.01)\n",
"\n",
"# Set up M-step (gradient descent).\n",
"@tf.function(autograph=False, jit_compile=True)\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,9 +114,8 @@
"import pandas as pd\n",
"import arviz as az\n",
"\n",
"import tensorflow.compat.v2 as tf\n",
"tf.enable_v2_behavior()\n",
"\n",
"import tensorflow as tf\n",
"import tf_keras\n",
"import tensorflow_probability as tfp\n",
"\n",
"sns.reset_defaults()\n",
Expand Down Expand Up @@ -2885,7 +2884,7 @@
},
"outputs": [],
"source": [
"opt = tf.optimizers.Adam(learning_rate=.1)\n",
"opt = tf_keras.optimizers.Adam(learning_rate=.1)\n",
"\n",
"@tf.function(experimental_compile=True)\n",
"def run_approximation():\n",
Expand Down Expand Up @@ -3232,13 +3231,13 @@
},
"outputs": [],
"source": [
"learning_rate = tf.optimizers.schedules.ExponentialDecay(\n",
"learning_rate = tf_keras.optimizers.schedules.ExponentialDecay(\n",
" initial_learning_rate=1e-2,\n",
" decay_steps=10,\n",
" decay_rate=0.99,\n",
" staircase=True)\n",
"\n",
"opt = tf.optimizers.Adam(learning_rate=learning_rate)\n",
"opt = tf_keras.optimizers.Adam(learning_rate=learning_rate)\n",
"\n",
"@tf.function(experimental_compile=True)\n",
"def run_approximation():\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,14 +91,13 @@
"import seaborn as sns\n",
"import warnings\n",
"\n",
"import tensorflow.compat.v2 as tf\n",
"tf.enable_v2_behavior()\n",
"\n",
"import tensorflow as tf\n",
"import tf_keras\n",
"import tensorflow_datasets as tfds\n",
"import tensorflow_probability as tfp\n",
"\n",
"tfk = tf.keras\n",
"tfkl = tf.keras.layers\n",
"tfk = tf_keras\n",
"tfkl = tf_keras.layers\n",
"tfpl = tfp.layers\n",
"tfd = tfp.distributions\n",
"tfb = tfp.bijectors\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,8 @@
"outputs": [],
"source": [
"import numpy as np\n",
"import tensorflow.compat.v2 as tf\n",
"tf.enable_v2_behavior()\n",
"import tensorflow as tf\n",
"import tf_keras\n",
"import tensorflow_probability as tfp\n",
"from tensorflow_probability import distributions as tfd\n",
"\n",
Expand Down Expand Up @@ -317,7 +317,7 @@
"\n",
"losses = tfp.math.minimize(\n",
" lambda: -log_prob(),\n",
" optimizer=tf.keras.optimizers.Adam(learning_rate=0.1),\n",
" optimizer=tf_keras.optimizers.Adam(learning_rate=0.1),\n",
" num_steps=100)\n",
"plt.plot(losses)\n",
"plt.ylabel('Negative log marginal likelihood')"
Expand Down Expand Up @@ -740,7 +740,7 @@
"source": [
"losses = tfp.math.minimize(\n",
" lambda: -log_prob(),\n",
" optimizer=tf.keras.optimizers.Adam(0.1),\n",
" optimizer=tf_keras.optimizers.Adam(0.1),\n",
" num_steps=100)\n",
"plt.plot(losses)\n",
"plt.ylabel('Negative log marginal likelihood')"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,9 @@
"from six.moves import urllib\n",
"from sklearn import preprocessing\n",
"\n",
"import tensorflow.compat.v2 as tf\n",
"tf.enable_v2_behavior()\n",
"\n",
"import tensorflow as tf\n",
"import tf_keras\n",
"\n",
"import tensorflow_probability as tfp"
]
},
Expand Down Expand Up @@ -457,7 +457,7 @@
" pred = tf.matmul(feat, tf.expand_dims(beta, axis=-1)) + intercept\n",
" mse_loss = tf.reduce_sum(\n",
" tf.cast(\n",
" tf.losses.mean_squared_error(y_true=lab, y_pred=pred), tf.float64))\n",
" tf_keras.losses.mean_squared_error(y_true=lab, y_pred=pred), tf.float64))\n",
" l1_penalty = regularization * tf.reduce_sum(tf.abs(beta))\n",
" total_loss = mse_loss + l1_penalty\n",
" return total_loss"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,8 @@
"import numpy as np\n",
"import seaborn as sns\n",
"\n",
"import tensorflow.compat.v2 as tf\n",
"tf.enable_v2_behavior()\n",
"import tensorflow as tf\n",
"import tf_keras\n",
"\n",
"import tensorflow_probability as tfp\n",
"\n",
Expand Down Expand Up @@ -283,13 +283,13 @@
],
"source": [
"# Build model.\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(1),\n",
"model = tf_keras.Sequential([\n",
" tf_keras.layers.Dense(1),\n",
" tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t, scale=1)),\n",
"])\n",
"\n",
"# Do inference.\n",
"model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n",
"model.compile(optimizer=tf_keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n",
"model.fit(x, y, epochs=1000, verbose=False);\n",
"\n",
"# Profit.\n",
Expand Down Expand Up @@ -383,15 +383,15 @@
],
"source": [
"# Build model.\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(1 + 1),\n",
"model = tf_keras.Sequential([\n",
" tf_keras.layers.Dense(1 + 1),\n",
" tfp.layers.DistributionLambda(\n",
" lambda t: tfd.Normal(loc=t[..., :1],\n",
" scale=1e-3 + tf.math.softplus(0.05 * t[...,1:]))),\n",
"])\n",
"\n",
"# Do inference.\n",
"model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n",
"model.compile(optimizer=tf_keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n",
"model.fit(x, y, epochs=1000, verbose=False);\n",
"\n",
"# Profit.\n",
Expand Down Expand Up @@ -480,7 +480,7 @@
"def posterior_mean_field(kernel_size, bias_size=0, dtype=None):\n",
" n = kernel_size + bias_size\n",
" c = np.log(np.expm1(1.))\n",
" return tf.keras.Sequential([\n",
" return tf_keras.Sequential([\n",
" tfp.layers.VariableLayer(2 * n, dtype=dtype),\n",
" tfp.layers.DistributionLambda(lambda t: tfd.Independent(\n",
" tfd.Normal(loc=t[..., :n],\n",
Expand All @@ -503,7 +503,7 @@
"# Specify the prior over `keras.layers.Dense` `kernel` and `bias`.\n",
"def prior_trainable(kernel_size, bias_size=0, dtype=None):\n",
" n = kernel_size + bias_size\n",
" return tf.keras.Sequential([\n",
" return tf_keras.Sequential([\n",
" tfp.layers.VariableLayer(n, dtype=dtype),\n",
" tfp.layers.DistributionLambda(lambda t: tfd.Independent(\n",
" tfd.Normal(loc=t, scale=1),\n",
Expand Down Expand Up @@ -534,13 +534,13 @@
],
"source": [
"# Build model.\n",
"model = tf.keras.Sequential([\n",
"model = tf_keras.Sequential([\n",
" tfp.layers.DenseVariational(1, posterior_mean_field, prior_trainable, kl_weight=1/x.shape[0]),\n",
" tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t, scale=1)),\n",
"])\n",
"\n",
"# Do inference.\n",
"model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n",
"model.compile(optimizer=tf_keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n",
"model.fit(x, y, epochs=1000, verbose=False);\n",
"\n",
"# Profit.\n",
Expand Down Expand Up @@ -642,15 +642,15 @@
],
"source": [
"# Build model.\n",
"model = tf.keras.Sequential([\n",
"model = tf_keras.Sequential([\n",
" tfp.layers.DenseVariational(1 + 1, posterior_mean_field, prior_trainable, kl_weight=1/x.shape[0]),\n",
" tfp.layers.DistributionLambda(\n",
" lambda t: tfd.Normal(loc=t[..., :1],\n",
" scale=1e-3 + tf.math.softplus(0.01 * t[...,1:]))),\n",
"])\n",
"\n",
"# Do inference.\n",
"model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n",
"model.compile(optimizer=tf_keras.optimizers.Adam(learning_rate=0.01), loss=negloglik)\n",
"model.fit(x, y, epochs=1000, verbose=False);\n",
"\n",
"# Profit.\n",
Expand Down Expand Up @@ -741,7 +741,7 @@
"outputs": [],
"source": [
"#@title Custom PSD Kernel\n",
"class RBFKernelFn(tf.keras.layers.Layer):\n",
"class RBFKernelFn(tf_keras.layers.Layer):\n",
" def __init__(self, **kwargs):\n",
" super(RBFKernelFn, self).__init__(**kwargs)\n",
" dtype = kwargs.get('dtype', None)\n",
Expand Down Expand Up @@ -783,13 +783,13 @@
"outputs": [],
"source": [
"# For numeric stability, set the default floating-point dtype to float64\n",
"tf.keras.backend.set_floatx('float64')\n",
"tf_keras.backend.set_floatx('float64')\n",
"\n",
"# Build model.\n",
"num_inducing_points = 40\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.InputLayer(input_shape=[1]),\n",
" tf.keras.layers.Dense(1, kernel_initializer='ones', use_bias=False),\n",
"model = tf_keras.Sequential([\n",
" tf_keras.layers.InputLayer(input_shape=[1]),\n",
" tf_keras.layers.Dense(1, kernel_initializer='ones', use_bias=False),\n",
" tfp.layers.VariationalGaussianProcess(\n",
" num_inducing_points=num_inducing_points,\n",
" kernel_provider=RBFKernelFn(),\n",
Expand All @@ -806,7 +806,7 @@
"batch_size = 32\n",
"loss = lambda y, rv_y: rv_y.variational_loss(\n",
" y, kl_weight=np.array(batch_size, x.dtype) / x.shape[0])\n",
"model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), loss=loss)\n",
"model.compile(optimizer=tf_keras.optimizers.Adam(learning_rate=0.01), loss=loss)\n",
"model.fit(x, y, batch_size=batch_size, epochs=1000, verbose=False)\n",
"\n",
"# Profit.\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,15 +96,13 @@
"\n",
"import numpy as np\n",
"\n",
"import tensorflow.compat.v2 as tf\n",
"tf.enable_v2_behavior()\n",
"\n",
"import tensorflow as tf\n",
"import tf_keras as tfk\n",
"import tensorflow_datasets as tfds\n",
"import tensorflow_probability as tfp\n",
"\n",
"\n",
"tfk = tf.keras\n",
"tfkl = tf.keras.layers\n",
"tfkl = tf_keras.layers\n",
"tfpl = tfp.layers\n",
"tfd = tfp.distributions"
]
Expand Down Expand Up @@ -434,7 +432,7 @@
"source": [
"negloglik = lambda x, rv_x: -rv_x.log_prob(x)\n",
"\n",
"vae.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),\n",
"vae.compile(optimizer=tf_keras.optimizers.Adam(learning_rate=1e-3),\n",
" loss=negloglik)\n",
"\n",
"_ = vae.fit(train_dataset,\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,14 +102,13 @@
"import numpy as np\n",
"import seaborn as sns\n",
"\n",
"import tensorflow.compat.v2 as tf\n",
"import tensorflow as tf\n",
"import tf_keras\n",
"import tensorflow_probability as tfp\n",
"\n",
"from tensorflow_probability import bijectors as tfb\n",
"from tensorflow_probability import distributions as tfd\n",
"\n",
"tf.enable_v2_behavior()\n",
"\n",
"plt.style.use(\"ggplot\")\n",
"warnings.filterwarnings('ignore')"
]
Expand Down Expand Up @@ -337,7 +336,7 @@
"target_log_prob_fn = lambda w, z: model.log_prob((w, z, x_train))\n",
"losses = tfp.math.minimize(\n",
" lambda: -target_log_prob_fn(w, z),\n",
" optimizer=tf.keras.optimizers.Adam(learning_rate=0.05),\n",
" optimizer=tf_keras.optimizers.Adam(learning_rate=0.05),\n",
" num_steps=200)"
]
},
Expand Down Expand Up @@ -479,7 +478,7 @@
"losses = tfp.vi.fit_surrogate_posterior(\n",
" target_log_prob_fn,\n",
" surrogate_posterior=surrogate_posterior,\n",
" optimizer=tf.keras.optimizers.Adam(learning_rate=0.05),\n",
" optimizer=tf_keras.optimizers.Adam(learning_rate=0.05),\n",
" num_steps=200)"
]
},
Expand Down
Loading

0 comments on commit e3a03b4

Please sign in to comment.