Skip to content

Commit

Permalink
Skip ownership transfer test when using ray client to be compatible w…
Browse files Browse the repository at this point in the history
…ith Ray 2.4.0 (#344)

* skip ownership test when using ray client

Signed-off-by: Zhi Lin <[email protected]>

* test ray 2.4

Signed-off-by: Zhi Lin <[email protected]>

* fix

Signed-off-by: Zhi Lin <[email protected]>

---------

Signed-off-by: Zhi Lin <[email protected]>
  • Loading branch information
kira-lin authored May 26, 2023
1 parent e1753a2 commit 9c094c4
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 4 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/raydp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ jobs:
else
pip install torch
fi
pip install pyarrow==6.0.1 ray[default]==2.1.0 pytest koalas tensorflow tabulate grpcio-tools wget
pip install pyarrow==6.0.1 ray[default]==2.4.0 pytest koalas tensorflow tabulate grpcio-tools wget
pip install "xgboost_ray[default]<=0.1.13"
pip install torchmetrics
HOROVOD_WITH_GLOO=1
Expand Down
11 changes: 9 additions & 2 deletions python/raydp/tests/test_data_owner_transfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,14 @@ def test_fail_without_data_ownership_transfer(ray_cluster):
its owner (e.g. Spark JVM process) has terminated, which is expected.
"""

# skipping this to be compatible with ray 2.4.0
# see issue #343
if not ray.worker.global_worker.connected:
pytest.skip("Skip this test if using ray client")

from raydp.spark.dataset import spark_dataframe_to_ray_dataset

num_executor = 1

num_executor = 1
spark = raydp.init_spark(
app_name = "example",
num_executors = num_executor,
Expand Down Expand Up @@ -84,6 +88,9 @@ def test_data_ownership_transfer(ray_cluster):
This test should be able to execute till the end without crash as expected.
"""

if not ray.worker.global_worker.connected:
pytest.skip("Skip this test if using ray client")

from raydp.spark.dataset import spark_dataframe_to_ray_dataset
import numpy as np

Expand Down
8 changes: 8 additions & 0 deletions python/raydp/tests/test_spark_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,10 @@ def test_spark_driver_and_executor_hostname(spark_on_ray_small):


def test_ray_dataset_roundtrip(spark_on_ray_small):
# skipping this to be compatible with ray 2.4.0
# see issue #343
if not ray.worker.global_worker.connected:
pytest.skip("Skip this test if using ray client")
spark = spark_on_ray_small
spark_df = spark.createDataFrame([(1, "a"), (2, "b"), (3, "c")], ["one", "two"])
rows = [(r.one, r.two) for r in spark_df.take(3)]
Expand All @@ -107,6 +111,10 @@ def test_ray_dataset_roundtrip(spark_on_ray_small):


def test_ray_dataset_to_spark(spark_on_ray_small):
# skipping this to be compatible with ray 2.4.0
# see issue #343
if not ray.worker.global_worker.connected:
pytest.skip("Skip this test if using ray client")
spark = spark_on_ray_small
n = 5
data = {"value": list(range(n))}
Expand Down
1 change: 0 additions & 1 deletion python/raydp/tf/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,6 @@ def train_func(config):
)
if config["evaluate"]:
eval_dataset = session.get_dataset_shard("evaluate")
eval_dataset.fully_executed()
eval_tf_dataset = eval_dataset.to_tf(
feature_columns=config["feature_columns"],
label_columns=config["label_columns"],
Expand Down

0 comments on commit 9c094c4

Please sign in to comment.