diff --git a/doc/source/conf.py b/doc/source/conf.py index 2d477c6387fd..d1641e665063 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -243,7 +243,6 @@ def __init__(self, version: str): "ray-overview/examples/llamafactory-llm-fine-tune/README.ipynb", "ray-overview/examples/llamafactory-llm-fine-tune/**/*.ipynb", "serve/tutorials/asynchronous-inference/content/asynchronous-inference.ipynb", - "serve/tutorials/asynchronous-inference/content/README.md", # Legacy/backward compatibility "ray-overview/examples/**/README.md", "train/examples/**/README.md", diff --git a/doc/source/serve/tutorials/asynchronous-inference/content/README.md b/doc/source/serve/tutorials/asynchronous-inference/content/README.md index 1d3fad1e1082..fb85c632099b 100644 --- a/doc/source/serve/tutorials/asynchronous-inference/content/README.md +++ b/doc/source/serve/tutorials/asynchronous-inference/content/README.md @@ -1,3 +1,6 @@ +--- +orphan: true +--- # Asynchronous Inference with Ray Serve **⏱️ Time to complete:** 30 minutes @@ -54,14 +57,14 @@ Redis serves as both the message broker (task queue) and result backend. **Install and start Redis (Google Colab compatible):** -```python +```bash # Install and start Redis server -!sudo apt-get update -qq -!sudo apt-get install -y redis-server -!redis-server --port 6399 --save "" --appendonly no --daemonize yes +sudo apt-get update -qq +sudo apt-get install -y redis-server +redis-server --port 6399 --save "" --appendonly no --daemonize yes # Verify Redis is running -!redis-cli -p 6399 ping +redis-cli -p 6399 ping ``` **Alternative methods:** @@ -74,7 +77,7 @@ Redis serves as both the message broker (task queue) and result backend. ```python -!pip install -q ray[serve-async-inference]>=2.50.0 requests>=2.31.0 PyPDF2>=3.0.0 celery[redis] +pip install -q ray[serve-async-inference]>=2.50.0 requests>=2.31.0 PyPDF2>=3.0.0 celery[redis] ``` ## Step 3: Start the Ray Serve Application