forked from ray-project/ray
-
Notifications
You must be signed in to change notification settings - Fork 0
/
dataset.py
4968 lines (4233 loc) · 199 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import collections
import copy
import html
import itertools
import logging
import sys
import time
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generic,
Iterable,
Iterator,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
)
from uuid import uuid4
import numpy as np
import ray
import ray.cloudpickle as pickle
from ray._private.thirdparty.tabulate.tabulate import tabulate
from ray._private.usage import usage_lib
from ray.air.util.data_batch_conversion import BlockFormat
from ray.air.util.tensor_extensions.utils import _create_possibly_ragged_ndarray
from ray.data._internal.block_list import BlockList
from ray.data._internal.compute import (
ActorPoolStrategy,
CallableClass,
ComputeStrategy,
TaskPoolStrategy,
)
from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder
from ray.data._internal.equalize import _equalize
from ray.data._internal.execution.interfaces import RefBundle
from ray.data._internal.execution.legacy_compat import _block_list_to_bundles
from ray.data._internal.iterator.iterator_impl import DataIteratorImpl
from ray.data._internal.iterator.stream_split_iterator import StreamSplitDataIterator
from ray.data._internal.lazy_block_list import LazyBlockList
from ray.data._internal.logical.operators.all_to_all_operator import (
RandomizeBlocks,
RandomShuffle,
Repartition,
Sort,
)
from ray.data._internal.logical.operators.input_data_operator import InputData
from ray.data._internal.logical.operators.map_operator import (
Filter,
FlatMap,
MapBatches,
MapRows,
)
from ray.data._internal.logical.operators.n_ary_operator import (
Union as UnionLogicalOperator,
)
from ray.data._internal.logical.operators.n_ary_operator import Zip
from ray.data._internal.logical.operators.one_to_one_operator import Limit
from ray.data._internal.logical.operators.write_operator import Write
from ray.data._internal.logical.optimizers import LogicalPlan
from ray.data._internal.pandas_block import PandasBlockSchema
from ray.data._internal.plan import ExecutionPlan, OneToOneStage
from ray.data._internal.planner.filter import generate_filter_fn
from ray.data._internal.planner.flat_map import generate_flat_map_fn
from ray.data._internal.planner.map_batches import generate_map_batches_fn
from ray.data._internal.planner.map_rows import generate_map_rows_fn
from ray.data._internal.planner.write import generate_write_fn
from ray.data._internal.progress_bar import ProgressBar
from ray.data._internal.remote_fn import cached_remote_fn
from ray.data._internal.split import _get_num_rows, _split_at_indices
from ray.data._internal.stage_impl import (
LimitStage,
RandomizeBlocksStage,
RandomShuffleStage,
RepartitionStage,
SortStage,
ZipStage,
)
from ray.data._internal.stats import DatasetStats, DatasetStatsSummary
from ray.data._internal.util import (
ConsumptionAPI,
_estimate_available_parallelism,
_is_local_scheme,
validate_compute,
)
from ray.data.aggregate import AggregateFn, Max, Mean, Min, Std, Sum
from ray.data.block import (
VALID_BATCH_FORMATS,
Block,
BlockAccessor,
BlockMetadata,
BlockPartition,
DataBatch,
T,
U,
UserDefinedFunction,
_apply_strict_mode_batch_format,
_apply_strict_mode_batch_size,
_validate_key_fn,
)
from ray.data.context import (
ESTIMATED_SAFE_MEMORY_FRACTION,
OK_PREFIX,
WARN_PREFIX,
DataContext,
)
from ray.data.datasource import (
BlockWritePathProvider,
CSVDatasource,
Datasource,
DefaultBlockWritePathProvider,
JSONDatasource,
NumpyDatasource,
ParquetDatasource,
ReadTask,
TFRecordDatasource,
WriteResult,
)
from ray.data.datasource.file_based_datasource import (
_unwrap_arrow_serialization_workaround,
_wrap_arrow_serialization_workaround,
)
from ray.data.iterator import DataIterator
from ray.data.random_access_dataset import RandomAccessDataset
from ray.types import ObjectRef
from ray.util.annotations import Deprecated, DeveloperAPI, PublicAPI
from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy
from ray.widgets import Template
from ray.widgets.util import repr_with_fallback
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
if TYPE_CHECKING:
import dask
import mars
import modin
import pandas
import pyarrow
import pyspark
import tensorflow as tf
import torch
import torch.utils.data
from tensorflow_metadata.proto.v0 import schema_pb2
from ray.data._internal.execution.interfaces import Executor, NodeIdStr
from ray.data._internal.torch_iterable_dataset import TorchTensorBatchType
from ray.data.dataset_pipeline import DatasetPipeline
from ray.data.grouped_data import GroupedData
logger = logging.getLogger(__name__)
TensorflowFeatureTypeSpec = Union[
"tf.TypeSpec", List["tf.TypeSpec"], Dict[str, "tf.TypeSpec"]
]
TensorFlowTensorBatchType = Union["tf.Tensor", Dict[str, "tf.Tensor"]]
@PublicAPI
class Dataset:
"""A Dataset is a distributed data collection for data loading and processing.
Datasets are distributed pipelines that produce ``ObjectRef[Block]`` outputs,
where each block holds data in Arrow format, representing a shard of the overall
data collection. The block also determines the unit of parallelism. For more
details, see :ref:`Ray Data Internals <dataset_concept>`.
Datasets can be created in multiple ways: from synthetic data via ``range_*()``
APIs, from existing memory data via ``from_*()`` APIs (this creates a subclass
of Dataset called ``MaterializedDataset``), or from external storage
systems such as local disk, S3, HDFS etc. via the ``read_*()`` APIs. The
(potentially processed) Dataset can be saved back to external storage systems
via the ``write_*()`` APIs.
Examples:
.. testcode::
:skipif: True
import ray
# Create dataset from synthetic data.
ds = ray.data.range(1000)
# Create dataset from in-memory data.
ds = ray.data.from_items(
[{"col1": i, "col2": i * 2} for i in range(1000)]
)
# Create dataset from external storage system.
ds = ray.data.read_parquet("s3:https://bucket/path")
# Save dataset back to external storage system.
ds.write_csv("s3:https://bucket/output")
Dataset has two kinds of operations: transformation, which takes in Dataset
and outputs a new Dataset (e.g. :py:meth:`.map_batches()`); and consumption,
which produces values (not a data stream) as output
(e.g. :meth:`.iter_batches()`).
Dataset transformations are lazy, with execution of the transformations being
triggered by downstream consumption.
Dataset supports parallel processing at scale: transformations such as
:py:meth:`.map_batches()`, aggregations such as
:py:meth:`.min()`/:py:meth:`.max()`/:py:meth:`.mean()`, grouping via
:py:meth:`.groupby()`, shuffling operations such as :py:meth:`.sort()`,
:py:meth:`.random_shuffle()`, and :py:meth:`.repartition()`.
Examples:
>>> import ray
>>> ds = ray.data.range(1000)
>>> # Transform batches (Dict[str, np.ndarray]) with map_batches().
>>> ds.map_batches(lambda batch: {"id": batch["id"] * 2}) # doctest: +ELLIPSIS
MapBatches(<lambda>)
+- Dataset(num_blocks=..., num_rows=1000, schema={id: int64})
>>> # Compute the maximum.
>>> ds.max("id")
999
>>> # Shuffle this dataset randomly.
>>> ds.random_shuffle() # doctest: +ELLIPSIS
RandomShuffle
+- Dataset(num_blocks=..., num_rows=1000, schema={id: int64})
>>> # Sort it back in order.
>>> ds.sort("id") # doctest: +ELLIPSIS
Sort
+- Dataset(num_blocks=..., num_rows=1000, schema={id: int64})
Both unexecuted and materialized Datasets can be passed between Ray tasks and
actors without incurring a copy. Dataset supports conversion to/from several
more featureful dataframe libraries (e.g., Spark, Dask, Modin, MARS), and are also
compatible with distributed TensorFlow / PyTorch.
"""
def __init__(
self,
plan: ExecutionPlan,
epoch: int,
lazy: bool = True,
logical_plan: Optional[LogicalPlan] = None,
):
"""Construct a Dataset (internal API).
The constructor is not part of the Dataset API. Use the ``ray.data.*``
read methods to construct a dataset.
"""
assert isinstance(plan, ExecutionPlan), type(plan)
usage_lib.record_library_usage("dataset") # Legacy telemetry name.
self._plan = plan
self._uuid = uuid4().hex
self._epoch = epoch
self._lazy = lazy
self._logical_plan = logical_plan
if logical_plan is not None:
self._plan.link_logical_plan(logical_plan)
if not lazy:
self._plan.execute(allow_clear_input_blocks=False)
# Handle to currently running executor for this dataset.
self._current_executor: Optional["Executor"] = None
@staticmethod
def copy(
ds: "Dataset", _deep_copy: bool = False, _as: Optional[type] = None
) -> "Dataset":
if not _as:
_as = type(ds)
if _deep_copy:
return _as(ds._plan.deep_copy(), ds._epoch, ds._lazy, ds._logical_plan)
else:
return _as(ds._plan.copy(), ds._epoch, ds._lazy, ds._logical_plan)
def map(
self,
fn: UserDefinedFunction[Dict[str, Any], Dict[str, Any]],
*,
compute: Optional[ComputeStrategy] = None,
num_cpus: Optional[float] = None,
num_gpus: Optional[float] = None,
**ray_remote_args,
) -> "Dataset":
"""Apply the given function to each record of this dataset.
Note that mapping individual records can be quite slow. Consider using
`.map_batches()` for performance.
Examples:
>>> import ray
>>> # Transform python objects.
>>> ds = ray.data.range(1000)
>>> # The function goes from record (Dict[str, Any]) to record.
>>> ds.map(lambda record: {"id": record["id"] * 2})
Map
+- Dataset(num_blocks=..., num_rows=1000, schema={id: int64})
>>> # Transform Arrow records.
>>> ds = ray.data.from_items(
... [{"value": i} for i in range(1000)])
>>> ds.map(lambda record: {"v2": record["value"] * 2})
Map
+- Dataset(num_blocks=200, num_rows=1000, schema={value: int64})
>>> # Define a callable class that persists state across
>>> # function invocations for efficiency.
>>> init_model = ... # doctest: +SKIP
>>> class CachedModel:
... def __init__(self):
... self.model = init_model()
... def __call__(self, batch):
... return self.model(batch)
>>> # Apply the transform in parallel on GPUs. Since
>>> # compute=ActorPoolStrategy(size=8) the transform is applied on a
>>> # pool of 8 Ray actors, each allocated 1 GPU by Ray.
>>> ds.map(CachedModel, # doctest: +SKIP
... compute=ray.data.ActorPoolStrategy(size=8),
... num_gpus=1)
Time complexity: O(dataset size / parallelism)
Args:
fn: The function to apply to each record, or a class type
that can be instantiated to create such a callable. Callable classes are
only supported for the actor compute strategy.
compute: The compute strategy, either None (default) to use Ray
tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor
pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an
autoscaling actor pool.
num_cpus: The number of CPUs to reserve for each parallel map worker.
num_gpus: The number of GPUs to reserve for each parallel map worker. For
example, specify `num_gpus=1` to request 1 GPU for each parallel map
worker.
ray_remote_args: Additional resource requirements to request from
ray for each map worker.
.. seealso::
:meth:`~Dataset.flat_map`:
Call this method to create new records from existing ones. Unlike
:meth:`~Dataset.map`, a function passed to
:meth:`~Dataset.flat_map` can return multiple records.
:meth:`~Dataset.flat_map` isn't recommended because it's slow; call
:meth:`~Dataset.map_batches` instead.
:meth:`~Dataset.map_batches`
Call this method to transform batches of data. It's faster and more
flexible than :meth:`~Dataset.map` and :meth:`~Dataset.flat_map`.
"""
validate_compute(fn, compute)
transform_fn = generate_map_rows_fn()
if num_cpus is not None:
ray_remote_args["num_cpus"] = num_cpus
if num_gpus is not None:
ray_remote_args["num_gpus"] = num_gpus
plan = self._plan.with_stage(
OneToOneStage(
"Map",
transform_fn,
compute,
ray_remote_args,
fn=fn,
)
)
logical_plan = self._logical_plan
if logical_plan is not None:
map_op = MapRows(
logical_plan.dag,
fn,
compute=compute,
ray_remote_args=ray_remote_args,
)
logical_plan = LogicalPlan(map_op)
return Dataset(plan, self._epoch, self._lazy, logical_plan)
def map_batches(
self,
fn: UserDefinedFunction[DataBatch, DataBatch],
*,
batch_size: Union[int, None, Literal["default"]] = "default",
compute: Optional[ComputeStrategy] = None,
batch_format: Optional[str] = "default",
zero_copy_batch: bool = False,
fn_args: Optional[Iterable[Any]] = None,
fn_kwargs: Optional[Dict[str, Any]] = None,
fn_constructor_args: Optional[Iterable[Any]] = None,
fn_constructor_kwargs: Optional[Dict[str, Any]] = None,
num_cpus: Optional[float] = None,
num_gpus: Optional[float] = None,
**ray_remote_args,
) -> "Dataset":
"""Apply the given function to batches of data.
This applies the ``fn`` in parallel with map tasks, with each task handling
a batch of data (typically Dict[str, np.ndarray] or pd.DataFrame).
To learn more, see the :ref:`Transforming batches user guide <transforming_batches>`.
.. tip::
If ``fn`` does not mutate its input, set ``zero_copy_batch=True`` to elide a
batch copy, which can improve performance and decrease memory utilization.
``fn`` will then receive zero-copy read-only batches.
If ``fn`` mutates its input, you will need to ensure that the batch provided
to ``fn`` is writable by setting ``zero_copy_batch=False`` (default). This
will create an extra, mutable copy of each batch before handing it to
``fn``.
.. note::
The size of the batches provided to ``fn`` may be smaller than the provided
``batch_size`` if ``batch_size`` doesn't evenly divide the block(s) sent to
a given map task. When ``batch_size`` is specified, each map task is
sent a single block if the block is equal to or larger than ``batch_size``,
and is sent a bundle of blocks up to (but not exceeding)
``batch_size`` if blocks are smaller than ``batch_size``.
Examples:
>>> import numpy as np
>>> import ray
>>> ds = ray.data.from_items([
... {"name": "Luna", "age": 4},
... {"name": "Rory", "age": 14},
... {"name": "Scout", "age": 9},
... ])
>>> ds # doctest: +SKIP
MaterializedDataset(
num_blocks=3,
num_rows=3,
schema={name: string, age: int64}
)
Here ``fn`` returns the same batch type as the input, but your ``fn`` can
also return a different batch type (e.g., pd.DataFrame). Read more about
:ref:`Transforming batches <transforming_batches>`.
>>> from typing import Dict
>>> def map_fn(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
... batch["age_in_dog_years"] = 7 * batch["age"]
... return batch
>>> ds = ds.map_batches(map_fn)
>>> ds
MapBatches(map_fn)
+- Dataset(num_blocks=3, num_rows=3, schema={name: string, age: int64})
:ref:`Actors <actor-guide>` can improve the performance of some workloads.
For example, you can use :ref:`actors <actor-guide>` to load a model once
per worker instead of once per inference.
To transform batches with :ref:`actors <actor-guide>`, pass a callable type
to ``fn`` and specify an :class:`~ray.data.ActorPoolStrategy`.
In the example below, ``CachedModel`` is called on an autoscaling pool of
two to eight :ref:`actors <actor-guide>`, each allocated one GPU by Ray.
>>> init_large_model = ... # doctest: +SKIP
>>> class CachedModel:
... def __init__(self):
... self.model = init_large_model()
... def __call__(self, item):
... return self.model(item)
>>> ds.map_batches( # doctest: +SKIP
... CachedModel, # doctest: +SKIP
... batch_size=256, # doctest: +SKIP
... compute=ray.data.ActorPoolStrategy(size=8), # doctest: +SKIP
... num_gpus=1,
... ) # doctest: +SKIP
``fn`` can also be a generator, yielding multiple batches in a single
invocation. This is useful when returning large objects. Instead of
returning a very large output batch, ``fn`` can instead yield the
output batch in chunks.
>>> def map_fn_with_large_output(batch):
... for i in range(3):
... yield {"large_output": np.ones((100, 1000))}
>>> ds = ray.data.from_items([1])
>>> ds = ds.map_batches(map_fn_with_large_output)
>>> ds
MapBatches(map_fn_with_large_output)
+- Dataset(num_blocks=..., num_rows=1, schema={item: int64})
Args:
fn: The function or generator to apply to each record batch, or a class type
that can be instantiated to create such a callable. Callable classes are
only supported for the actor compute strategy. Note ``fn`` must be
pickle-able.
batch_size: The desired number of rows in each batch, or None to use entire
blocks as batches (blocks may contain different number of rows).
The actual size of the batch provided to ``fn`` may be smaller than
``batch_size`` if ``batch_size`` doesn't evenly divide the block(s) sent
to a given map task. Default batch_size is 4096 with "default".
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor
pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an
autoscaling actor pool.
batch_format: Specify ``"default"`` to use the default block format
(NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to
select ``pyarrow.Table``, or ``"numpy"`` to select
``Dict[str, numpy.ndarray]``, or None to return the underlying block
exactly as is with no additional formatting.
zero_copy_batch: Whether ``fn`` should be provided zero-copy, read-only
batches. If this is ``True`` and no copy is required for the
``batch_format`` conversion, the batch is a zero-copy, read-only
view on data in Ray's object store, which can decrease memory
utilization and improve performance. If this is ``False``, the batch
is writable, which will require an extra copy to guarantee.
If ``fn`` mutates its input, this will need to be ``False`` in order to
avoid "assignment destination is read-only" or "buffer source array is
read-only" errors. Default is ``False``.
fn_args: Positional arguments to pass to ``fn`` after the first argument.
These arguments are top-level arguments to the underlying Ray task.
fn_kwargs: Keyword arguments to pass to ``fn``. These arguments are
top-level arguments to the underlying Ray task.
fn_constructor_args: Positional arguments to pass to ``fn``'s constructor.
You can only provide this if ``fn`` is a callable class. These arguments
are top-level arguments in the underlying Ray actor construction task.
fn_constructor_kwargs: Keyword arguments to pass to ``fn``'s constructor.
This can only be provided if ``fn`` is a callable class. These arguments
are top-level arguments in the underlying Ray actor construction task.
num_cpus: The number of CPUs to reserve for each parallel map worker.
num_gpus: The number of GPUs to reserve for each parallel map worker. For
example, specify `num_gpus=1` to request 1 GPU for each parallel map worker.
ray_remote_args: Additional resource requirements to request from
ray for each map worker.
.. seealso::
:meth:`~Dataset.iter_batches`
Call this function to iterate over batches of data.
:meth:`~Dataset.flat_map`:
Call this method to create new records from existing ones. Unlike
:meth:`~Dataset.map`, a function passed to :meth:`~Dataset.flat_map`
can return multiple records.
:meth:`~Dataset.flat_map` isn't recommended because it's slow; call
:meth:`~Dataset.map_batches` instead.
:meth:`~Dataset.map`
Call this method to transform one record at time.
This method isn't recommended because it's slow; call
:meth:`~Dataset.map_batches` instead.
""" # noqa: E501
if num_cpus is not None:
ray_remote_args["num_cpus"] = num_cpus
if num_gpus is not None:
ray_remote_args["num_gpus"] = num_gpus
batch_format = _apply_strict_mode_batch_format(batch_format)
if batch_format == "native":
logger.warning("The 'native' batch format has been renamed 'default'.")
target_block_size = None
if batch_size is not None and batch_size != "default":
if batch_size < 1:
raise ValueError("Batch size cannot be negative or 0")
# Enable blocks bundling when batch_size is specified by caller.
target_block_size = batch_size
batch_size = _apply_strict_mode_batch_size(
batch_size, use_gpu="num_gpus" in ray_remote_args
)
if batch_format not in VALID_BATCH_FORMATS:
raise ValueError(
f"The batch format must be one of {VALID_BATCH_FORMATS}, got: "
f"{batch_format}"
)
validate_compute(fn, compute)
if fn_constructor_args is not None or fn_constructor_kwargs is not None:
if compute is None or (
compute != "actors" and not isinstance(compute, ActorPoolStrategy)
):
raise ValueError(
"fn_constructor_args and fn_constructor_kwargs can only be "
"specified if using the actor pool compute strategy, but got: "
f"{compute}"
)
if not isinstance(fn, CallableClass):
raise ValueError(
"fn_constructor_args and fn_constructor_kwargs can only be "
"specified if providing a CallableClass instance for fn, but got: "
f"{fn}"
)
transform_fn = generate_map_batches_fn(
batch_size=batch_size,
batch_format=batch_format,
zero_copy_batch=zero_copy_batch,
)
# TODO(chengsu): pass function name to MapBatches logical operator.
if hasattr(fn, "__self__") and isinstance(
fn.__self__, ray.data.preprocessor.Preprocessor
):
stage_name = fn.__self__.__class__.__name__
else:
stage_name = f'MapBatches({getattr(fn, "__name__", type(fn))})'
stage = OneToOneStage(
stage_name,
transform_fn,
compute,
ray_remote_args,
# TODO(Clark): Add a strict cap here.
target_block_size=target_block_size,
fn=fn,
fn_args=fn_args,
fn_kwargs=fn_kwargs,
fn_constructor_args=fn_constructor_args,
fn_constructor_kwargs=fn_constructor_kwargs,
)
plan = self._plan.with_stage(stage)
logical_plan = self._logical_plan
if logical_plan is not None:
map_batches_op = MapBatches(
logical_plan.dag,
fn,
batch_size=batch_size,
batch_format=batch_format,
zero_copy_batch=zero_copy_batch,
target_block_size=target_block_size,
fn_args=fn_args,
fn_kwargs=fn_kwargs,
fn_constructor_args=fn_constructor_args,
fn_constructor_kwargs=fn_constructor_kwargs,
compute=compute,
ray_remote_args=ray_remote_args,
)
logical_plan = LogicalPlan(map_batches_op)
return Dataset(plan, self._epoch, self._lazy, logical_plan)
def add_column(
self,
col: str,
fn: Callable[["pandas.DataFrame"], "pandas.Series"],
*,
compute: Optional[str] = None,
**ray_remote_args,
) -> "Dataset":
"""Add the given column to the dataset.
This is only supported for datasets convertible to pandas format.
A function generating the new column values given the batch in pandas
format must be specified.
Examples:
>>> import ray
>>> ds = ray.data.range(100)
>>> # Add a new column equal to value * 2.
>>> ds = ds.add_column("new_col", lambda df: df["id"] * 2)
>>> # Overwrite the existing "value" with zeros.
>>> ds = ds.add_column("id", lambda df: 0)
Time complexity: O(dataset size / parallelism)
Args:
col: Name of the column to add. If the name already exists, the
column is overwritten.
fn: Map function generating the column values given a batch of
records in pandas format.
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor
pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an
autoscaling actor pool.
ray_remote_args: Additional resource requirements to request from
ray (e.g., num_gpus=1 to request GPUs for the map tasks).
"""
def process_batch(batch: "pandas.DataFrame") -> "pandas.DataFrame":
batch.loc[:, col] = fn(batch)
return batch
if not callable(fn):
raise ValueError("`fn` must be callable, got {}".format(fn))
return self.map_batches(
process_batch,
batch_format="pandas", # TODO(ekl) we should make this configurable.
compute=compute,
zero_copy_batch=False,
**ray_remote_args,
)
def drop_columns(
self,
cols: List[str],
*,
compute: Optional[str] = None,
**ray_remote_args,
) -> "Dataset":
"""Drop one or more columns from the dataset.
Examples:
>>> import ray
>>> ds = ray.data.range(100)
>>> # Add a new column equal to value * 2.
>>> ds = ds.add_column("new_col", lambda df: df["id"] * 2)
>>> # Drop the existing "value" column.
>>> ds = ds.drop_columns(["id"])
Time complexity: O(dataset size / parallelism)
Args:
cols: Names of the columns to drop. If any name does not exist,
an exception is raised.
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor
pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an
autoscaling actor pool.
ray_remote_args: Additional resource requirements to request from
ray (e.g., num_gpus=1 to request GPUs for the map tasks).
"""
return self.map_batches(
lambda batch: batch.drop(columns=cols),
batch_format="pandas",
zero_copy_batch=True,
compute=compute,
**ray_remote_args,
)
def select_columns(
self,
cols: List[str],
*,
compute: Union[str, ComputeStrategy] = None,
**ray_remote_args,
) -> "Dataset":
"""Select one or more columns from the dataset.
All input columns used to select need to be in the schema of the dataset.
Examples:
>>> import ray
>>> # Create a dataset with 3 columns
>>> ds = ray.data.from_items([{"col1": i, "col2": i+1, "col3": i+2}
... for i in range(10)])
>>> # Select only "col1" and "col2" columns.
>>> ds = ds.select_columns(cols=["col1", "col2"])
>>> ds
MapBatches(<lambda>)
+- Dataset(
num_blocks=...,
num_rows=10,
schema={col1: int64, col2: int64, col3: int64}
)
Time complexity: O(dataset size / parallelism)
Args:
cols: Names of the columns to select. If any name is not included in the
dataset schema, an exception is raised.
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor
pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an
autoscaling actor pool.
ray_remote_args: Additional resource requirements to request from
ray (e.g., num_gpus=1 to request GPUs for the map tasks).
""" # noqa: E501
return self.map_batches(
lambda batch: BlockAccessor.for_block(batch).select(columns=cols),
batch_format="pandas",
zero_copy_batch=True,
compute=compute,
**ray_remote_args,
)
def flat_map(
self,
fn: UserDefinedFunction[Dict[str, Any], List[Dict[str, Any]]],
*,
compute: Optional[ComputeStrategy] = None,
num_cpus: Optional[float] = None,
num_gpus: Optional[float] = None,
**ray_remote_args,
) -> "Dataset":
"""Apply the given function to each record and then flatten results.
Consider using ``.map_batches()`` for better performance (the batch size can be
altered in map_batches).
Examples:
>>> import ray
>>> ds = ray.data.range(1000)
>>> ds.flat_map(lambda x: [{"id": 1}, {"id": 2}, {"id": 4}])
FlatMap
+- Dataset(num_blocks=..., num_rows=1000, schema={id: int64})
Time complexity: O(dataset size / parallelism)
Args:
fn: The function or generator to apply to each record, or a class type
that can be instantiated to create such a callable. Callable classes are
only supported for the actor compute strategy.
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor
pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an
autoscaling actor pool.
num_cpus: The number of CPUs to reserve for each parallel map worker.
num_gpus: The number of GPUs to reserve for each parallel map worker. For
example, specify `num_gpus=1` to request 1 GPU for each parallel map
worker.
ray_remote_args: Additional resource requirements to request from
ray for each map worker.
.. seealso::
:meth:`~Dataset.map_batches`
Call this method to transform batches of data. It's faster and more
flexible than :meth:`~Dataset.map` and :meth:`~Dataset.flat_map`.
:meth:`~Dataset.map`
Call this method to transform one record at time.
This method isn't recommended because it's slow; call
:meth:`~Dataset.map_batches` instead.
"""
validate_compute(fn, compute)
transform_fn = generate_flat_map_fn()
if num_cpus is not None:
ray_remote_args["num_cpus"] = num_cpus
if num_gpus is not None:
ray_remote_args["num_gpus"] = num_gpus
plan = self._plan.with_stage(
OneToOneStage("FlatMap", transform_fn, compute, ray_remote_args, fn=fn)
)
logical_plan = self._logical_plan
if logical_plan is not None:
op = FlatMap(
input_op=logical_plan.dag,
fn=fn,
compute=compute,
ray_remote_args=ray_remote_args,
)
logical_plan = LogicalPlan(op)
return Dataset(plan, self._epoch, self._lazy, logical_plan)
def filter(
self,
fn: UserDefinedFunction[Dict[str, Any], bool],
*,
compute: Union[str, ComputeStrategy] = None,
**ray_remote_args,
) -> "Dataset":
"""Filter out records that do not satisfy the given predicate.
Consider using ``.map_batches()`` for better performance (you can implement
filter by dropping records).
Examples:
>>> import ray
>>> ds = ray.data.range(100)
>>> ds.filter(lambda x: x["id"] % 2 == 0)
Filter
+- Dataset(num_blocks=..., num_rows=100, schema={id: int64})
Time complexity: O(dataset size / parallelism)
Args:
fn: The predicate to apply to each record, or a class type
that can be instantiated to create such a callable. Callable classes are
only supported for the actor compute strategy.
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor
pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an
autoscaling actor pool.
ray_remote_args: Additional resource requirements to request from
ray (e.g., num_gpus=1 to request GPUs for the map tasks).
"""
validate_compute(fn, compute)
transform_fn = generate_filter_fn()
plan = self._plan.with_stage(
OneToOneStage("Filter", transform_fn, compute, ray_remote_args, fn=fn)
)
logical_plan = self._logical_plan
if logical_plan is not None:
op = Filter(
input_op=logical_plan.dag,
fn=fn,
compute=compute,
ray_remote_args=ray_remote_args,
)
logical_plan = LogicalPlan(op)
return Dataset(plan, self._epoch, self._lazy, logical_plan)
def repartition(self, num_blocks: int, *, shuffle: bool = False) -> "Dataset":
"""Repartition the dataset into exactly this number of blocks.
After repartitioning, all blocks in the returned dataset will have
approximately the same number of rows.
Repartition has two modes:
* ``shuffle=False`` - performs the minimal data movement needed to equalize block sizes
* ``shuffle=True`` - performs a full distributed shuffle
.. image:: /data/images/dataset-shuffle.svg
:align: center
..
https://docs.google.com/drawings/d/132jhE3KXZsf29ho1yUdPrCHB9uheHBWHJhDQMXqIVPA/edit
Examples:
>>> import ray
>>> ds = ray.data.range(100)
>>> # Set the number of output partitions to write to disk.
>>> ds.repartition(10).write_parquet("/tmp/test")
Time complexity: O(dataset size / parallelism)
Args:
num_blocks: The number of blocks.
shuffle: Whether to perform a distributed shuffle during the
repartition. When shuffle is enabled, each output block
contains a subset of data rows from each input block, which
requires all-to-all data movement. When shuffle is disabled,
output blocks are created from adjacent input blocks,
minimizing data movement.
Returns:
The repartitioned dataset.
""" # noqa: E501
plan = self._plan.with_stage(RepartitionStage(num_blocks, shuffle))
logical_plan = self._logical_plan
if logical_plan is not None:
op = Repartition(
logical_plan.dag,
num_outputs=num_blocks,
shuffle=shuffle,
)
logical_plan = LogicalPlan(op)
return Dataset(plan, self._epoch, self._lazy, logical_plan)
def random_shuffle(
self,
*,
seed: Optional[int] = None,
num_blocks: Optional[int] = None,
**ray_remote_args,
) -> "Dataset":
"""Randomly shuffle the elements of this dataset.
.. tip::
``random_shuffle`` can be slow. For better performance, try
`Iterating over batches with shuffling <iterating-over-data#iterating-over-batches-with-shuffling>`_.
Examples:
>>> import ray
>>> ds = ray.data.range(100)
>>> # Shuffle this dataset randomly.
>>> ds.random_shuffle()
RandomShuffle
+- Dataset(num_blocks=..., num_rows=100, schema={id: int64})
>>> # Shuffle this dataset with a fixed random seed.
>>> ds.random_shuffle(seed=12345)
RandomShuffle
+- Dataset(num_blocks=..., num_rows=100, schema={id: int64})
Time complexity: O(dataset size / parallelism)
Args:
seed: Fix the random seed to use, otherwise one is chosen
based on system randomness.
num_blocks: The number of output blocks after the shuffle, or None
to retain the number of blocks.
Returns: