From 5b6db94c7aa40a699782096fa7ebf3d3e93c74f4 Mon Sep 17 00:00:00 2001 From: Archit Kulkarni Date: Tue, 7 Nov 2023 09:18:03 -0800 Subject: [PATCH] [Doc] Update KubeRay version to 1.0.0 (#40937) --- .../examples/gpu-training-example.md | 6 +++--- .../examples/mobilenet-rayservice.md | 2 +- .../rayjob-batch-inference-example.md | 4 ++-- .../examples/stable-diffusion-rayservice.md | 2 +- .../examples/text-summarizer-rayservice.md | 2 +- .../getting-started/raycluster-quick-start.md | 6 +++--- .../getting-started/rayjob-quick-start.md | 2 +- .../getting-started/rayservice-quick-start.md | 2 +- .../kubernetes/k8s-ecosystem/ingress.md | 12 +++++------ .../kubernetes/k8s-ecosystem/kubeflow.md | 2 +- .../k8s-ecosystem/prometheus-grafana.md | 2 +- .../cluster/kubernetes/k8s-ecosystem/pyspy.md | 2 +- .../kubernetes/k8s-ecosystem/volcano.md | 6 +++--- .../rayservice-troubleshooting.md | 6 +++--- .../user-guides/configuring-autoscaling.md | 6 +++--- .../kubernetes/user-guides/gke-gcs-bucket.md | 2 +- .../kubernetes/user-guides/helm-chart-rbac.md | 18 ++++++++--------- .../kubernetes/user-guides/kuberay-gcs-ft.md | 16 +++++++-------- .../kubernetes/user-guides/pod-command.md | 2 +- .../user-guides/rayserve-dev-doc.md | 8 ++++---- .../kubernetes/user-guides/rayservice.md | 4 ++-- .../cluster/kubernetes/user-guides/tls.md | 8 ++++---- .../kubernetes/user-guides/upgrade-guide.md | 20 +++++++++---------- .../serve/production-guide/kubernetes.md | 2 +- 24 files changed, 71 insertions(+), 71 deletions(-) diff --git a/doc/source/cluster/kubernetes/examples/gpu-training-example.md b/doc/source/cluster/kubernetes/examples/gpu-training-example.md index 3ae48a85a9239..bf729e4a3f190 100644 --- a/doc/source/cluster/kubernetes/examples/gpu-training-example.md +++ b/doc/source/cluster/kubernetes/examples/gpu-training-example.md @@ -40,10 +40,10 @@ kubectl apply -f https://raw.githubusercontent.com/GoogleCloudPlatform/container # (Method 2) "gcloud container clusters get-credentials --region --project " # (Method 3) "kubectl config use-context ..." -# Install both CRDs and KubeRay operator v1.0.0-rc.0. +# Install both CRDs and KubeRay operator v1.0.0. helm repo add kuberay https://ray-project.github.io/kuberay-helm/ helm repo update -helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0-rc.0 +helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0 # Create a Ray cluster kubectl apply -f https://raw.githubusercontent.com/ray-project/ray/master/doc/source/cluster/kubernetes/configs/ray-cluster.gpu.yaml @@ -116,7 +116,7 @@ It is optional. # Create the KubeRay operator helm repo add kuberay https://ray-project.github.io/kuberay-helm/ helm repo update -helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0-rc.0 +helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0 # Create a Ray cluster kubectl apply -f https://raw.githubusercontent.com/ray-project/ray/master/doc/source/cluster/kubernetes/configs/ray-cluster.gpu.yaml diff --git a/doc/source/cluster/kubernetes/examples/mobilenet-rayservice.md b/doc/source/cluster/kubernetes/examples/mobilenet-rayservice.md index 06f69463a8525..46d522b5b792f 100644 --- a/doc/source/cluster/kubernetes/examples/mobilenet-rayservice.md +++ b/doc/source/cluster/kubernetes/examples/mobilenet-rayservice.md @@ -19,7 +19,7 @@ Note that the YAML file in this example uses `serveConfigV2`, which is supported ```sh # Download `ray-service.mobilenet.yaml` -curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray-service.mobilenet.yaml +curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-service.mobilenet.yaml # Create a RayService kubectl apply -f ray-service.mobilenet.yaml diff --git a/doc/source/cluster/kubernetes/examples/rayjob-batch-inference-example.md b/doc/source/cluster/kubernetes/examples/rayjob-batch-inference-example.md index 4d890349bb4e4..37f91043e0bfc 100644 --- a/doc/source/cluster/kubernetes/examples/rayjob-batch-inference-example.md +++ b/doc/source/cluster/kubernetes/examples/rayjob-batch-inference-example.md @@ -39,12 +39,12 @@ It should be scheduled on the CPU pod. ## Step 2: Submit the RayJob -Create the RayJob custom resource. The RayJob spec is defined in [ray-job.batch-inference.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-job.batch-inference.yaml). +Create the RayJob custom resource. The RayJob spec is defined in [ray-job.batch-inference.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-job.batch-inference.yaml). Download the file with `curl`: ```bash -curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray-job.batch-inference.yaml +curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-job.batch-inference.yaml ``` Note that the `RayJob` spec contains a spec for the `RayCluster` that is to be created for the job. For this tutorial, we use a single-node cluster with 4 GPUs. For production use cases, we recommend using a multi-node cluster where the head node does not have GPUs, so that Ray can automatically schedule GPU workloads on worker nodes and they won't interfere with critical Ray processes on the head node. diff --git a/doc/source/cluster/kubernetes/examples/stable-diffusion-rayservice.md b/doc/source/cluster/kubernetes/examples/stable-diffusion-rayservice.md index a93478e0d6207..6f469d2e9995c 100644 --- a/doc/source/cluster/kubernetes/examples/stable-diffusion-rayservice.md +++ b/doc/source/cluster/kubernetes/examples/stable-diffusion-rayservice.md @@ -18,7 +18,7 @@ Please note that the YAML file in this example uses `serveConfigV2`, which is su ```sh # Step 3.1: Download `ray-service.stable-diffusion.yaml` -curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray-service.stable-diffusion.yaml +curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-service.stable-diffusion.yaml # Step 3.2: Create a RayService kubectl apply -f ray-service.stable-diffusion.yaml diff --git a/doc/source/cluster/kubernetes/examples/text-summarizer-rayservice.md b/doc/source/cluster/kubernetes/examples/text-summarizer-rayservice.md index c0ebcf570cbfd..d0e3c5dbea9a3 100644 --- a/doc/source/cluster/kubernetes/examples/text-summarizer-rayservice.md +++ b/doc/source/cluster/kubernetes/examples/text-summarizer-rayservice.md @@ -17,7 +17,7 @@ Please note that the YAML file in this example uses `serveConfigV2`, which is su ```sh # Step 3.1: Download `ray-service.text-summarizer.yaml` -curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray-service.text-summarizer.yaml +curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-service.text-summarizer.yaml # Step 3.2: Create a RayService kubectl apply -f ray-service.text-summarizer.yaml diff --git a/doc/source/cluster/kubernetes/getting-started/raycluster-quick-start.md b/doc/source/cluster/kubernetes/getting-started/raycluster-quick-start.md index a2cc182d8c1e6..09d533a55dbeb 100644 --- a/doc/source/cluster/kubernetes/getting-started/raycluster-quick-start.md +++ b/doc/source/cluster/kubernetes/getting-started/raycluster-quick-start.md @@ -26,8 +26,8 @@ Deploy the KubeRay operator with the [Helm chart repository](https://github.com/ helm repo add kuberay https://ray-project.github.io/kuberay-helm/ helm repo update -# Install both CRDs and KubeRay operator v1.0.0-rc.0. -helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0-rc.0 +# Install both CRDs and KubeRay operator v1.0.0. +helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0 # Confirm that the operator is running in the namespace `default`. kubectl get pods @@ -43,7 +43,7 @@ Once the KubeRay operator is running, we are ready to deploy a RayCluster. To do ```sh # Deploy a sample RayCluster CR from the KubeRay Helm chart repo: -helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.0 +helm install raycluster kuberay/ray-cluster --version 1.0.0 # Once the RayCluster CR has been created, you can view it by running: kubectl get rayclusters diff --git a/doc/source/cluster/kubernetes/getting-started/rayjob-quick-start.md b/doc/source/cluster/kubernetes/getting-started/rayjob-quick-start.md index 68dc907c89fa3..601fdab3e3949 100644 --- a/doc/source/cluster/kubernetes/getting-started/rayjob-quick-start.md +++ b/doc/source/cluster/kubernetes/getting-started/rayjob-quick-start.md @@ -54,7 +54,7 @@ Please note that the YAML file in this example uses `serveConfigV2` to specify a ```sh # Step 3.1: Download `ray_v1alpha1_rayjob.yaml` -curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray_v1alpha1_rayjob.yaml +curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray_v1alpha1_rayjob.yaml # Step 3.2: Create a RayJob kubectl apply -f ray_v1alpha1_rayjob.yaml diff --git a/doc/source/cluster/kubernetes/getting-started/rayservice-quick-start.md b/doc/source/cluster/kubernetes/getting-started/rayservice-quick-start.md index 794899851aba1..06252ddf5c146 100644 --- a/doc/source/cluster/kubernetes/getting-started/rayservice-quick-start.md +++ b/doc/source/cluster/kubernetes/getting-started/rayservice-quick-start.md @@ -40,7 +40,7 @@ Please note that the YAML file in this example uses `serveConfigV2` to specify a ```sh # Step 3.1: Download `ray_v1alpha1_rayservice.yaml` -curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray_v1alpha1_rayservice.yaml +curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray_v1alpha1_rayservice.yaml # Step 3.2: Create a RayService kubectl apply -f ray_v1alpha1_rayservice.yaml diff --git a/doc/source/cluster/kubernetes/k8s-ecosystem/ingress.md b/doc/source/cluster/kubernetes/k8s-ecosystem/ingress.md index 0a111f823f54e..0d351a6e7b522 100644 --- a/doc/source/cluster/kubernetes/k8s-ecosystem/ingress.md +++ b/doc/source/cluster/kubernetes/k8s-ecosystem/ingress.md @@ -32,10 +32,10 @@ Three examples show how to use ingress to access your Ray cluster: # Step 1: Install KubeRay operator and CRD helm repo add kuberay https://ray-project.github.io/kuberay-helm/ helm repo update -helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0-rc.0 +helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0 # Step 2: Install a RayCluster -helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.0 +helm install raycluster kuberay/ray-cluster --version 1.0.0 # Step 3: Edit the `ray-operator/config/samples/ray-cluster-alb-ingress.yaml` # @@ -122,10 +122,10 @@ Now run the following commands: # Step 1: Install KubeRay operator and CRD helm repo add kuberay https://ray-project.github.io/kuberay-helm/ helm repo update -helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0-rc.0 +helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0 # Step 2: Install a RayCluster -helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.0 +helm install raycluster kuberay/ray-cluster --version 1.0.0 # Step 3: Edit ray-cluster-gclb-ingress.yaml to replace the service name with the name of the head service from the RayCluster. (Output of `kubectl get svc`) @@ -185,12 +185,12 @@ kubectl wait --namespace ingress-nginx \ # Step 3: Install KubeRay operator and CRD helm repo add kuberay https://ray-project.github.io/kuberay-helm/ helm repo update -helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0-rc.0 +helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0 # Step 4: Install RayCluster and create an ingress separately. # More information about change of setting was documented in https://github.com/ray-project/kuberay/pull/699 # and `ray-operator/config/samples/ray-cluster.separate-ingress.yaml` -curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.separate-ingress.yaml +curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-cluster.separate-ingress.yaml kubectl apply -f ray-operator/config/samples/ray-cluster.separate-ingress.yaml # Step 5: Check the ingress created in Step 4. diff --git a/doc/source/cluster/kubernetes/k8s-ecosystem/kubeflow.md b/doc/source/cluster/kubernetes/k8s-ecosystem/kubeflow.md index 0278ed5c9195c..895805098c42e 100644 --- a/doc/source/cluster/kubernetes/k8s-ecosystem/kubeflow.md +++ b/doc/source/cluster/kubernetes/k8s-ecosystem/kubeflow.md @@ -42,7 +42,7 @@ kustomize version --short ```sh # Create a RayCluster CR, and the KubeRay operator will reconcile a Ray cluster # with 1 head Pod and 1 worker Pod. -helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.0 --set image.tag=2.2.0-py38-cpu +helm install raycluster kuberay/ray-cluster --version 1.0.0 --set image.tag=2.2.0-py38-cpu # Check RayCluster kubectl get pod -l ray.io/cluster=raycluster-kuberay diff --git a/doc/source/cluster/kubernetes/k8s-ecosystem/prometheus-grafana.md b/doc/source/cluster/kubernetes/k8s-ecosystem/prometheus-grafana.md index ac2b497649fa1..28847e41bd4a0 100644 --- a/doc/source/cluster/kubernetes/k8s-ecosystem/prometheus-grafana.md +++ b/doc/source/cluster/kubernetes/k8s-ecosystem/prometheus-grafana.md @@ -87,7 +87,7 @@ kubectl get service * `# HELP`: Describe the meaning of this metric. * `# TYPE`: See [this document](https://prometheus.io/docs/concepts/metric_types/) for more details. -* Three required environment variables are defined in [ray-cluster.embed-grafana.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.embed-grafana.yaml). See [Configuring and Managing Ray Dashboard](https://docs.ray.io/en/latest/cluster/configure-manage-dashboard.html) for more details about these environment variables. +* Three required environment variables are defined in [ray-cluster.embed-grafana.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-cluster.embed-grafana.yaml). See [Configuring and Managing Ray Dashboard](https://docs.ray.io/en/latest/cluster/configure-manage-dashboard.html) for more details about these environment variables. ```yaml env: - name: RAY_GRAFANA_IFRAME_HOST diff --git a/doc/source/cluster/kubernetes/k8s-ecosystem/pyspy.md b/doc/source/cluster/kubernetes/k8s-ecosystem/pyspy.md index 39cf063b07396..08ca00d2e2c35 100644 --- a/doc/source/cluster/kubernetes/k8s-ecosystem/pyspy.md +++ b/doc/source/cluster/kubernetes/k8s-ecosystem/pyspy.md @@ -33,7 +33,7 @@ Follow [this document](kuberay-operator-deploy) to install the latest stable Kub ```bash # Download `ray-cluster.py-spy.yaml` -curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.py-spy.yaml +curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-cluster.py-spy.yaml # Create a RayCluster kubectl apply -f ray-cluster.py-spy.yaml diff --git a/doc/source/cluster/kubernetes/k8s-ecosystem/volcano.md b/doc/source/cluster/kubernetes/k8s-ecosystem/volcano.md index 8dd116e341a05..e163521301e92 100644 --- a/doc/source/cluster/kubernetes/k8s-ecosystem/volcano.md +++ b/doc/source/cluster/kubernetes/k8s-ecosystem/volcano.md @@ -35,7 +35,7 @@ batchScheduler: * Pass the `--set batchScheduler.enabled=true` flag when running on the command line: ```shell # Install the Helm chart with --enable-batch-scheduler flag set to true -helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0-rc.0 --set batchScheduler.enabled=true +helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0 --set batchScheduler.enabled=true ``` ### Step 4: Install a RayCluster with the Volcano scheduler @@ -45,7 +45,7 @@ The RayCluster custom resource must include the `ray.io/scheduler-name: volcano` ```shell # Path: kuberay/ray-operator/config/samples # Includes label `ray.io/scheduler-name: volcano` in the metadata.labels -curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.volcano-scheduler.yaml +curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-cluster.volcano-scheduler.yaml kubectl apply -f ray-cluster.volcano-scheduler.yaml # Check the RayCluster @@ -113,7 +113,7 @@ Next, create a RayCluster with a head node (1 CPU + 2Gi of RAM) and two workers ```shell # Path: kuberay/ray-operator/config/samples # Includes the `ray.io/scheduler-name: volcano` and `volcano.sh/queue-name: kuberay-test-queue` labels in the metadata.labels -curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.volcano-scheduler-queue.yaml +curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-cluster.volcano-scheduler-queue.yaml kubectl apply -f ray-cluster.volcano-scheduler-queue.yaml ``` diff --git a/doc/source/cluster/kubernetes/troubleshooting/rayservice-troubleshooting.md b/doc/source/cluster/kubernetes/troubleshooting/rayservice-troubleshooting.md index aaa1097ae7919..b34d8d9be3f0b 100644 --- a/doc/source/cluster/kubernetes/troubleshooting/rayservice-troubleshooting.md +++ b/doc/source/cluster/kubernetes/troubleshooting/rayservice-troubleshooting.md @@ -124,7 +124,7 @@ Therefore, the YAML file includes `python-multipart` in the runtime environment. In the [MobileNet example](kuberay-mobilenet-rayservice-example), the [mobilenet.py](https://github.com/ray-project/serve_config_examples/blob/master/mobilenet/mobilenet.py) consists of two functions: `__init__()` and `__call__()`. The function `__call__()` is only called when the Serve application receives a request. -* Example 1: Remove `python-multipart` from the runtime environment in [the MobileNet YAML](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-service.mobilenet.yaml). +* Example 1: Remove `python-multipart` from the runtime environment in [the MobileNet YAML](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-service.mobilenet.yaml). * The `python-multipart` library is only required for the `__call__` method. Therefore, we can only observe the dependency issue when we send a request to the application. * Example error message: ```bash @@ -139,7 +139,7 @@ The function `__call__()` is only called when the Serve application receives a r AssertionError: The `python-multipart` library must be installed to use form parsing.. ``` -* Example 2: Update the image from `rayproject/ray-ml:2.5.0` to `rayproject/ray:2.5.0` in [the MobileNet YAML](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-service.mobilenet.yaml). The latter image does not include `tensorflow`. +* Example 2: Update the image from `rayproject/ray-ml:2.5.0` to `rayproject/ray:2.5.0` in [the MobileNet YAML](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-service.mobilenet.yaml). The latter image does not include `tensorflow`. * The `tensorflow` library is imported in the [mobilenet.py](https://github.com/ray-project/serve_config_examples/blob/master/mobilenet/mobilenet.py). * Example error message: ```bash @@ -162,7 +162,7 @@ The function `__call__()` is only called when the Serve application receives a r ### Issue 4: Incorrect `import_path`. You can refer to [the documentation](https://docs.ray.io/en/latest/serve/api/doc/ray.serve.schema.ServeApplicationSchema.html#ray.serve.schema.ServeApplicationSchema.import_path) for more details about the format of `import_path`. -Taking [the MobileNet YAML file](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-service.mobilenet.yaml) as an example, +Taking [the MobileNet YAML file](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-service.mobilenet.yaml) as an example, the `import_path` is `mobilenet.mobilenet:app`. The first `mobilenet` is the name of the directory in the `working_dir`, the second `mobilenet` is the name of the Python file in the directory `mobilenet/`, and `app` is the name of the variable representing Ray Serve application within the Python file. diff --git a/doc/source/cluster/kubernetes/user-guides/configuring-autoscaling.md b/doc/source/cluster/kubernetes/user-guides/configuring-autoscaling.md index 19cda42c75ea8..4079ea7f1f977 100644 --- a/doc/source/cluster/kubernetes/user-guides/configuring-autoscaling.md +++ b/doc/source/cluster/kubernetes/user-guides/configuring-autoscaling.md @@ -61,7 +61,7 @@ Follow [this document](kuberay-operator-deploy) to install the latest stable Kub ### Step 3: Create a RayCluster custom resource with autoscaling enabled ```bash -curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.autoscaler.yaml +curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-cluster.autoscaler.yaml kubectl apply -f ray-cluster.autoscaler.yaml ``` @@ -85,7 +85,7 @@ kubectl get configmaps ``` The RayCluster has one head Pod and zero worker Pods. The head Pod has two containers: a Ray head container and a Ray Autoscaler sidecar container. -Additionally, the [ray-cluster.autoscaler.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.autoscaler.yaml) includes a ConfigMap named `ray-example` that houses two Python scripts: `detached_actor.py` and `terminate_detached_actor`.py. +Additionally, the [ray-cluster.autoscaler.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-cluster.autoscaler.yaml) includes a ConfigMap named `ray-example` that houses two Python scripts: `detached_actor.py` and `terminate_detached_actor`.py. * `detached_actor.py` is a Python script that creates a detached actor which requires 1 CPU. ```py @@ -254,7 +254,7 @@ helm uninstall kuberay-operator (kuberay-autoscaling-config)= ## KubeRay Autoscaling Configurations -The [ray-cluster.autoscaler.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.autoscaler.yaml) used in the quickstart example contains detailed comments about the configuration options. +The [ray-cluster.autoscaler.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-cluster.autoscaler.yaml) used in the quickstart example contains detailed comments about the configuration options. ***It's recommended to read this section in conjunction with the YAML file.*** ### 1. Enabling autoscaling diff --git a/doc/source/cluster/kubernetes/user-guides/gke-gcs-bucket.md b/doc/source/cluster/kubernetes/user-guides/gke-gcs-bucket.md index e0de5fb96c59d..82ca976a03993 100644 --- a/doc/source/cluster/kubernetes/user-guides/gke-gcs-bucket.md +++ b/doc/source/cluster/kubernetes/user-guides/gke-gcs-bucket.md @@ -72,7 +72,7 @@ gsutil iam ch serviceAccount:my-iam-sa@my-project-id.iam.gserviceaccount.com:rol You can download the RayCluster YAML manifest for this tutorial with `curl` as follows: ```bash -curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.gke-bucket.yaml +curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-cluster.gke-bucket.yaml ``` The key parts are the following lines: diff --git a/doc/source/cluster/kubernetes/user-guides/helm-chart-rbac.md b/doc/source/cluster/kubernetes/user-guides/helm-chart-rbac.md index 468d7bd27ef10..0826f900e1285 100644 --- a/doc/source/cluster/kubernetes/user-guides/helm-chart-rbac.md +++ b/doc/source/cluster/kubernetes/user-guides/helm-chart-rbac.md @@ -69,9 +69,9 @@ kubectl get role #kuberay-operator-leader-election 2023-10-15T04:54:28Z # Install RayCluster in the `default`, `n1`, and `n2` namespaces. -helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.0 -helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.0 -n n1 -helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.0 -n n2 +helm install raycluster kuberay/ray-cluster --version 1.0.0 +helm install raycluster kuberay/ray-cluster --version 1.0.0 -n n1 +helm install raycluster kuberay/ray-cluster --version 1.0.0 -n n2 # You should create a RayCluster in these 3 namespaces. kubectl get raycluster -A @@ -117,9 +117,9 @@ kubectl get role --all-namespaces | grep kuberay #default kuberay-operator-leader-election 2023-10-15T05:18:03Z # Install RayCluster in the `default`, `n1`, and `n2` namespaces. -helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.0 -helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.0 -n n1 -helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.0 -n n2 +helm install raycluster kuberay/ray-cluster --version 1.0.0 +helm install raycluster kuberay/ray-cluster --version 1.0.0 -n n1 +helm install raycluster kuberay/ray-cluster --version 1.0.0 -n n2 # KubeRay only creates a RayCluster in `default`. kubectl get raycluster -A @@ -173,9 +173,9 @@ kubectl get role --all-namespaces | grep kuberay #n2 kuberay-operator 2023-10-15T05:34:27Z # Install RayCluster in the `default`, `n1`, and `n2` namespaces. -helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.0 -helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.0 -n n1 -helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.0 -n n2 +helm install raycluster kuberay/ray-cluster --version 1.0.0 +helm install raycluster kuberay/ray-cluster --version 1.0.0 -n n1 +helm install raycluster kuberay/ray-cluster --version 1.0.0 -n n2 # KubeRay creates a RayCluster only in n1 and n2. kubectl get raycluster -A diff --git a/doc/source/cluster/kubernetes/user-guides/kuberay-gcs-ft.md b/doc/source/cluster/kubernetes/user-guides/kuberay-gcs-ft.md index af9337044ea47..d857108d4d428 100644 --- a/doc/source/cluster/kubernetes/user-guides/kuberay-gcs-ft.md +++ b/doc/source/cluster/kubernetes/user-guides/kuberay-gcs-ft.md @@ -44,7 +44,7 @@ Follow [this document](kuberay-operator-deploy) to install the latest stable Kub ### Step 3: Install a RayCluster with GCS FT enabled ```sh -curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.external-redis.yaml +curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-cluster.external-redis.yaml kubectl apply -f ray-cluster.external-redis.yaml ``` @@ -65,7 +65,7 @@ kubectl get configmaps # ... ``` -The [ray-cluster.external-redis.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.external-redis.yaml) file defines Kubernetes resources for RayCluster, Redis, and ConfigMaps. +The [ray-cluster.external-redis.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-cluster.external-redis.yaml) file defines Kubernetes resources for RayCluster, Redis, and ConfigMaps. There are two ConfigMaps in this example: `ray-example` and `redis-config`. The `ray-example` ConfigMap houses two Python scripts: `detached_actor.py` and `increment_counter.py`. @@ -144,7 +144,7 @@ KEYS * HGETALL 864b004c-6305-42e3-ac46-adfa8eb6f752 ``` -In [ray-cluster.external-redis.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.external-redis.yaml), the `ray.io/external-storage-namespace` annotation isn't set for the RayCluster. +In [ray-cluster.external-redis.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-cluster.external-redis.yaml), the `ray.io/external-storage-namespace` annotation isn't set for the RayCluster. Therefore, KubeRay automatically injects the environment variable `RAY_external_storage_namespace` to all Ray Pods managed by the RayCluster with the RayCluster's UID as the external storage namespace by default. See [this section](kuberay-external-storage-namespace) to learn more about the annotation. @@ -179,7 +179,7 @@ kubectl get pods -l=ray.io/is-ray-node=yes # raycluster-external-redis-worker-small-group-yyyyy 1/1 Running 0 xxm ``` -In [ray-cluster.external-redis.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.external-redis.yaml), the `RAY_gcs_rpc_server_reconnect_timeout_s` environment variable isn't set in the specifications for either the head Pod or the worker Pod within the RayCluster. +In [ray-cluster.external-redis.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-cluster.external-redis.yaml), the `RAY_gcs_rpc_server_reconnect_timeout_s` environment variable isn't set in the specifications for either the head Pod or the worker Pod within the RayCluster. Therefore, KubeRay automatically injects the `RAY_gcs_rpc_server_reconnect_timeout_s` environment variable with the value **600** to the worker Pod and uses the default value **60** for the head Pod. The timeout value for worker Pods must be longer than the timeout value for the head Pod so that the worker Pods don't terminate before the head Pod restarts from a failure. @@ -223,7 +223,7 @@ KEYS * # [Expected output]: (empty list or set) ``` -Starting from KubeRay v1.0.0-rc.0, the KubeRay operator creates a Kubernetes Job to delete the Redis key when a user removes the RayCluster custom resource. +Starting from KubeRay v1.0.0, the KubeRay operator creates a Kubernetes Job to delete the Redis key when a user removes the RayCluster custom resource. To ensure Redis cleanup, the KubeRay operator adds a Kubernetes finalizer to the RayCluster with GCS fault tolerance enabled. KubeRay only removes this finalizer after the Kubernetes Job successfully cleans up Redis. @@ -243,7 +243,7 @@ kind delete cluster ## KubeRay GCS fault tolerance configurations -The [ray-cluster.external-redis.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.external-redis.yaml) used in the quickstart example contains detailed comments about the configuration options. +The [ray-cluster.external-redis.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-cluster.external-redis.yaml) used in the quickstart example contains detailed comments about the configuration options. ***Read this section in conjunction with the YAML file.*** ### 1. Enable GCS fault tolerance @@ -260,7 +260,7 @@ The [ray-cluster.external-redis.yaml](https://github.com/ray-project/kuberay/blo * **`redis-password`** in head's `rayStartParams`: Use this option to specify the password for the Redis service, thus allowing the Ray head to connect to it. -In the [ray-cluster.external-redis.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.external-redis.yaml), the RayCluster custom resource uses an environment variable `REDIS_PASSWORD` to store the password from a Kubernetes secret. +In the [ray-cluster.external-redis.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-cluster.external-redis.yaml), the RayCluster custom resource uses an environment variable `REDIS_PASSWORD` to store the password from a Kubernetes secret. ```yaml rayStartParams: redis-password: $REDIS_PASSWORD @@ -279,7 +279,7 @@ In the [ray-cluster.external-redis.yaml](https://github.com/ray-project/kuberay/ * **`RAY_REDIS_ADDRESS`** environment variable in head's Pod: Ray reads the `RAY_REDIS_ADDRESS` environment variable to establish a connection with the Redis server. -In the [ray-cluster.external-redis.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.external-redis.yaml), the RayCluster custom resource uses the `redis` Kubernetes ClusterIP service name as the connection point to the Redis server. The ClusterIP service is also created by the YAML file. +In the [ray-cluster.external-redis.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-cluster.external-redis.yaml), the RayCluster custom resource uses the `redis` Kubernetes ClusterIP service name as the connection point to the Redis server. The ClusterIP service is also created by the YAML file. ```yaml template: spec: diff --git a/doc/source/cluster/kubernetes/user-guides/pod-command.md b/doc/source/cluster/kubernetes/user-guides/pod-command.md index b1f54e9175113..1b0cba49f2978 100644 --- a/doc/source/cluster/kubernetes/user-guides/pod-command.md +++ b/doc/source/cluster/kubernetes/user-guides/pod-command.md @@ -45,7 +45,7 @@ Currently, for timing (1), we can set the container's `Command` and `Args` in Ra # Prerequisite: There is a KubeRay operator in the Kubernetes cluster. # Download `ray-cluster.head-command.yaml` - curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.head-command.yaml + curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-cluster.head-command.yaml # Create a RayCluster kubectl apply -f ray-cluster.head-command.yaml diff --git a/doc/source/cluster/kubernetes/user-guides/rayserve-dev-doc.md b/doc/source/cluster/kubernetes/user-guides/rayserve-dev-doc.md index 3147559df9bc8..0fea88ddce8cf 100644 --- a/doc/source/cluster/kubernetes/user-guides/rayserve-dev-doc.md +++ b/doc/source/cluster/kubernetes/user-guides/rayserve-dev-doc.md @@ -6,7 +6,7 @@ In this tutorial, you will learn how to effectively debug your Ray Serve scripts Many RayService issues are related to the Ray Serve Python scripts, so it is important to ensure the correctness of the scripts before deploying them to a RayService. This tutorial will show you how to develop a Ray Serve Python script for a MobileNet image classifier on a RayCluster. You can deploy and serve the classifier on your local Kind cluster without requiring a GPU. -Refer to [ray-service.mobilenet.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-service.mobilenet.yaml) and [mobilenet-rayservice.md](kuberay-mobilenet-rayservice-example) for more details. +Refer to [ray-service.mobilenet.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-service.mobilenet.yaml) and [mobilenet-rayservice.md](kuberay-mobilenet-rayservice-example) for more details. # Step 1: Install a KubeRay cluster @@ -16,7 +16,7 @@ Follow [this document](kuberay-operator-deploy) to install the latest stable Kub # Step 2: Create a RayCluster CR ```sh -helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.0 +helm install raycluster kuberay/ray-cluster --version 1.0.0 ``` # Step 3: Log in to the head Pod @@ -50,7 +50,7 @@ the second `mobilenet` is the name of the Python file in the directory `mobilene helm uninstall raycluster # Install the RayCluster CR with the Ray image `rayproject/ray-ml:${RAY_VERSION}` -helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.0 --set image.repository=rayproject/ray-ml +helm install raycluster kuberay/ray-cluster --version 1.0.0 --set image.repository=rayproject/ray-ml ``` The error message in Step 4 indicates that the Ray image `rayproject/ray:${RAY_VERSION}` does not have the TensorFlow package. @@ -128,4 +128,4 @@ python3 mobilenet_req.py In the previous steps, we found that the Ray Serve application can be successfully launched using the Ray image `rayproject/ray-ml:${RAY_VERSION}` and the {ref}`runtime environments ` `python-multipart==0.0.6`. Therefore, we can create a RayService YAML file with the same Ray image and runtime environment. -For more details, please refer to [ray-service.mobilenet.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-service.mobilenet.yaml) and [mobilenet-rayservice.md](kuberay-mobilenet-rayservice-example). +For more details, please refer to [ray-service.mobilenet.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-service.mobilenet.yaml) and [mobilenet-rayservice.md](kuberay-mobilenet-rayservice-example). diff --git a/doc/source/cluster/kubernetes/user-guides/rayservice.md b/doc/source/cluster/kubernetes/user-guides/rayservice.md index 63d4992a1f875..7c80ddd883cb0 100644 --- a/doc/source/cluster/kubernetes/user-guides/rayservice.md +++ b/doc/source/cluster/kubernetes/user-guides/rayservice.md @@ -40,7 +40,7 @@ Note that the YAML file in this example uses `serveConfigV2` to specify a multi- ```sh # Step 3.1: Download `ray_v1alpha1_rayservice.yaml` -curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray_v1alpha1_rayservice.yaml +curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray_v1alpha1_rayservice.yaml # Step 3.2: Create a RayService kubectl apply -f ray_v1alpha1_rayservice.yaml @@ -186,7 +186,7 @@ curl -X POST -H 'Content-Type: application/json' rayservice-sample-serve-svc:800 You can update the configurations for the applications by modifying `serveConfigV2` in the RayService config file. Reapplying the modified config with `kubectl apply` reapplies the new configurations to the existing RayCluster instead of creating a new RayCluster. -Update the price of mangos from `3` to `4` for the fruit stand app in [ray_v1alpha1_rayservice.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray_v1alpha1_rayservice.yaml). This change reconfigures the existing MangoStand deployment, and future requests will use the updated Mango price. +Update the price of mangos from `3` to `4` for the fruit stand app in [ray_v1alpha1_rayservice.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray_v1alpha1_rayservice.yaml). This change reconfigures the existing MangoStand deployment, and future requests will use the updated Mango price. ```sh # Step 7.1: Update the price of mangos from 3 to 4. diff --git a/doc/source/cluster/kubernetes/user-guides/tls.md b/doc/source/cluster/kubernetes/user-guides/tls.md index f0cbb06d7ec59..f241cf6862bd9 100644 --- a/doc/source/cluster/kubernetes/user-guides/tls.md +++ b/doc/source/cluster/kubernetes/user-guides/tls.md @@ -36,11 +36,11 @@ This [YouTube video](https://youtu.be/T4Df5_cojAs) is a good start. your CA private key in a Kubernetes Secret in your production environment. ```sh -# Install v1.0.0-rc.0 KubeRay operator +# Install v1.0.0 KubeRay operator # `ray-cluster.tls.yaml` will cover from Step 1 to Step 3 # Download `ray-cluster.tls.yaml` -curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.tls.yaml +curl -LO https://raw.githubusercontent.com/ray-project/kuberay/v1.0.0/ray-operator/config/samples/ray-cluster.tls.yaml # Create a RayCluster kubectl apply -f ray-cluster.tls.yaml @@ -88,11 +88,11 @@ kubectl create secret generic ca-tls --from-file=ca.key --from-file=ca.crt * `ca.crt`: CA's self-signed certificate This step is optional because the `ca.key` and `ca.crt` files have -already been included in the Kubernetes Secret specified in [ray-cluster.tls.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.tls.yaml). +already been included in the Kubernetes Secret specified in [ray-cluster.tls.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-cluster.tls.yaml). # Step 2: Create separate private key and self-signed certificate for Ray Pods -In [ray-cluster.tls.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-cluster.tls.yaml), each Ray +In [ray-cluster.tls.yaml](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-cluster.tls.yaml), each Ray Pod (both head and workers) generates its own private key file (`tls.key`) and self-signed certificate file (`tls.crt`) in its init container. We generate separate files for each Pod because worker Pods do not have deterministic DNS names, and we cannot use the same diff --git a/doc/source/cluster/kubernetes/user-guides/upgrade-guide.md b/doc/source/cluster/kubernetes/user-guides/upgrade-guide.md index a24acfd23d397..63aefacc871af 100644 --- a/doc/source/cluster/kubernetes/user-guides/upgrade-guide.md +++ b/doc/source/cluster/kubernetes/user-guides/upgrade-guide.md @@ -32,7 +32,7 @@ Upgrading the KubeRay version is the best strategy if you have any issues with K * Based on [the Helm documentation](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations), there is no support at this time for upgrading or deleting CRDs using Helm. * If you want to install the latest KubeRay release's CRD, you may need to delete the old CRD first. * Note that deleting the CRD causes a cascading deletion of custom resources. See the [Helm documentation](https://github.com/helm/community/blob/main/hips/hip-0011.md#deleting-crds) for more details. - * Example 1: Upgrade KubeRay from v0.6.0 to v1.0.0-rc.1 without deleting the old CRD. + * Example 1: Upgrade KubeRay from v0.6.0 to v1.0.0 without deleting the old CRD. ```shell # Install KubeRay v0.6.0 and CRD v1alpha1 helm install kuberay-operator kuberay/kuberay-operator --version 0.6.0 @@ -40,21 +40,21 @@ Upgrading the KubeRay version is the best strategy if you have any issues with K # The following instruction uninstalls only KubeRay v0.6.0. It does not uninstall CRD v1alpha1. helm uninstall kuberay-operator - # Install KubeRay v1.0.0-rc.1. Because the CRD already exists, the Helm instruction does not install the new CRD. - helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0-rc.1 + # Install KubeRay v1.0.0. Because the CRD already exists, the Helm instruction does not install the new CRD. + helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0 # Check CRD kubectl describe crd rayclusters.ray.io | grep v1 # You can only see "Name: v1alpha1", and cannot see "Name: v1". - # Install RayCluster v1.0.0-rc.1 which uses CRD v1. - helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.1 + # Install RayCluster v1.0.0 which uses CRD v1. + helm install raycluster kuberay/ray-cluster --version 1.0.0 # Error: INSTALLATION FAILED: unable to build kubernetes objects from release manifest: # resource mapping not found for name: "raycluster-kuberay" namespace: "" from "": no # matches for kind "RayCluster" in version "ray.io/v1" # ensure CRDs are installed first ``` - * Example 2: Upgrade KubeRay from v0.6.0 to v1.0.0-rc.1 with deleting the old CRD. + * Example 2: Upgrade KubeRay from v0.6.0 to v1.0.0 with deleting the old CRD. ```shell # Install KubeRay v0.6.0 and CRD v1alpha1 helm install kuberay-operator kuberay/kuberay-operator --version 0.6.0 @@ -67,13 +67,13 @@ Upgrading the KubeRay version is the best strategy if you have any issues with K kubectl delete crd rayjobs.ray.io kubectl delete crd rayservices.ray.io - # Install KubeRay v1.0.0-rc.1 and new CRD including v1. - helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0-rc.1 + # Install KubeRay v1.0.0 and new CRD including v1. + helm install kuberay-operator kuberay/kuberay-operator --version 1.0.0 # Check CRD kubectl describe crd rayclusters.ray.io | grep v1 # You can see both "Name: v1alpha1" and "Name: v1". - # Install RayCluster v1.0.0-rc.1 which uses CRD v1. - helm install raycluster kuberay/ray-cluster --version 1.0.0-rc.1 + # Install RayCluster v1.0.0 which uses CRD v1. + helm install raycluster kuberay/ray-cluster --version 1.0.0 ``` diff --git a/doc/source/serve/production-guide/kubernetes.md b/doc/source/serve/production-guide/kubernetes.md index 22bfd69330fb7..ae4f24b6c88df 100644 --- a/doc/source/serve/production-guide/kubernetes.md +++ b/doc/source/serve/production-guide/kubernetes.md @@ -231,7 +231,7 @@ In most use cases, it is recommended to enable Kubernetes autoscaling to fully u ::: ## Load balancer -Set up ingress to expose your Serve application with a load balancer. See [this configuration](https://github.com/ray-project/kuberay/blob/v1.0.0-rc.0/ray-operator/config/samples/ray-service-alb-ingress.yaml) +Set up ingress to expose your Serve application with a load balancer. See [this configuration](https://github.com/ray-project/kuberay/blob/v1.0.0/ray-operator/config/samples/ray-service-alb-ingress.yaml) :::{note} - Ray Serve runs HTTP proxy on every node, allowing you to use `/-/routes` as the endpoint for node health checks.