diff --git a/conda/environments/deployment_docs.yml b/conda/environments/deployment_docs.yml index 46029fcd..183340b5 100644 --- a/conda/environments/deployment_docs.yml +++ b/conda/environments/deployment_docs.yml @@ -10,7 +10,8 @@ dependencies: - pydata-sphinx-theme>=0.15.4 - python=3.12 - pre-commit>=3.8.0 - - sphinx>=8.0.2 + # Upper bound pin on sphinx can be removed once https://github.com/mgaitan/sphinxcontrib-mermaid/issues/160 is resolved + - sphinx>=8.0.2,<8.1 - sphinx-autobuild>=2024.9.19 - sphinx-copybutton>=0.5.2 - sphinx-design>=0.6.1 diff --git a/source/cloud/aws/eks.md b/source/cloud/aws/eks.md index 66648161..4a28cea4 100644 --- a/source/cloud/aws/eks.md +++ b/source/cloud/aws/eks.md @@ -24,7 +24,7 @@ Now we can launch a GPU enabled EKS cluster. First launch an EKS cluster with `e ```console $ eksctl create cluster rapids \ - --version 1.24 \ + --version 1.29 \ --nodes 3 \ --node-type=p3.8xlarge \ --timeout=40m \ @@ -32,8 +32,7 @@ $ eksctl create cluster rapids \ --ssh-public-key \ # Be sure to set your public key ID here --region us-east-1 \ --zones=us-east-1c,us-east-1b,us-east-1d \ - --auto-kubeconfig \ - --install-nvidia-plugin=false + --auto-kubeconfig ``` With this command, you’ve launched an EKS cluster called `rapids`. You’ve specified that it should use nodes of type `p3.8xlarge`. We also specified that we don't want to install the NVIDIA drivers as we will do that with the NVIDIA operator. @@ -46,30 +45,21 @@ $ aws eks --region us-east-1 update-kubeconfig --name rapids ## Install drivers -Next, [install the NVIDIA drivers](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/getting-started.html) onto each node. +As we selected a GPU node type EKS will automatically install drivers for us. We can verify this by listing the NVIDIA driver plugin Pods. ```console -$ helm install --repo https://helm.ngc.nvidia.com/nvidia --wait --generate-name -n gpu-operator --create-namespace gpu-operator -NAME: gpu-operator-1670843572 -NAMESPACE: gpu-operator -STATUS: deployed -REVISION: 1 -TEST SUITE: None +$ kubectl get po -n kube-system -l name=nvidia-device-plugin-ds +NAME READY STATUS RESTARTS AGE +nvidia-device-plugin-daemonset-kv7t5 1/1 Running 0 52m +nvidia-device-plugin-daemonset-rhmvx 1/1 Running 0 52m +nvidia-device-plugin-daemonset-thjhc 1/1 Running 0 52m ``` -Verify that the NVIDIA drivers are successfully installed. - -```console -$ kubectl get po -A --watch | grep nvidia -kube-system nvidia-driver-installer-6zwcn 1/1 Running 0 8m47s -kube-system nvidia-driver-installer-8zmmn 1/1 Running 0 8m47s -kube-system nvidia-driver-installer-mjkb8 1/1 Running 0 8m47s -kube-system nvidia-gpu-device-plugin-5ffkm 1/1 Running 0 13m -kube-system nvidia-gpu-device-plugin-d599s 1/1 Running 0 13m -kube-system nvidia-gpu-device-plugin-jrgjh 1/1 Running 0 13m +```{note} +By default this plugin will install the latest version on the NVIDIA drivers on every Node. If you need more control over your driver installation we recommend that when creating your cluster you set `eksctl create cluster --install-nvidia-plugin=false ...` and then install drivers yourself using the [NVIDIA GPU Operator](https://docs.nvidia.com/datacenter/cloud-native/gpu-operator/getting-started.html). ``` -After your drivers are installed, you are ready to test your cluster. +After you have confirmed your drivers are installed, you are ready to test your cluster. ```{include} ../../_includes/check-gpu-pod-works.md diff --git a/source/conf.py b/source/conf.py index 4a8db58d..02ce7ec4 100644 --- a/source/conf.py +++ b/source/conf.py @@ -21,25 +21,25 @@ author = "NVIDIA" # Single modifiable version for all of the docs - easier for future updates -stable_version = "24.08" -nightly_version = "24.10" +stable_version = "24.10" +nightly_version = "24.12" versions = { "stable": { "rapids_version": stable_version, "rapids_api_docs_version": "stable", - "rapids_container": f"nvcr.io/nvidia/rapidsai/base:{stable_version}-cuda12.5-py3.11", - "rapids_notebooks_container": f"nvcr.io/nvidia/rapidsai/notebooks:{stable_version}-cuda12.5-py3.11", + "rapids_container": f"nvcr.io/nvidia/rapidsai/base:{stable_version}-cuda12.5-py3.12", + "rapids_notebooks_container": f"nvcr.io/nvidia/rapidsai/notebooks:{stable_version}-cuda12.5-py3.12", "rapids_conda_channels": "-c rapidsai -c conda-forge -c nvidia", - "rapids_conda_packages": f"rapids={stable_version} python=3.11 cuda-version=12.5", + "rapids_conda_packages": f"rapids={stable_version} python=3.12 cuda-version=12.5", }, "nightly": { "rapids_version": f"{nightly_version}-nightly", "rapids_api_docs_version": "nightly", - "rapids_container": f"rapidsai/base:{nightly_version + 'a'}-cuda12.5-py3.11", - "rapids_notebooks_container": f"rapidsai/notebooks:{nightly_version + 'a'}-cuda12.5-py3.11", + "rapids_container": f"rapidsai/base:{nightly_version + 'a'}-cuda12.5-py3.12", + "rapids_notebooks_container": f"rapidsai/notebooks:{nightly_version + 'a'}-cuda12.5-py3.12", "rapids_conda_channels": "-c rapidsai-nightly -c conda-forge -c nvidia", - "rapids_conda_packages": f"rapids={nightly_version} python=3.11 cuda-version=12.5", + "rapids_conda_packages": f"rapids={nightly_version} python=3.12 cuda-version=12.5", }, } rapids_version = ( diff --git a/source/platforms/coiled.md b/source/platforms/coiled.md index b7584f02..88a3c954 100644 --- a/source/platforms/coiled.md +++ b/source/platforms/coiled.md @@ -14,7 +14,7 @@ To get started you need to install Coiled and login. ```console $ conda install -c conda-forge coiled -$ coiled setup +$ coiled login ``` For more information see the [Coiled Getting Started documentation](https://docs.coiled.io/user_guide/getting_started.html). @@ -82,7 +82,7 @@ We can also connect a Dask client to see that information for the workers too. ```python from dask.distributed import Client -client = Client +client = Client(cluster) client ```