-
Notifications
You must be signed in to change notification settings - Fork 279
/
coords.py
3032 lines (2528 loc) · 111 KB
/
coords.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright Iris contributors
#
# This file is part of Iris and is released under the BSD license.
# See LICENSE in the root of the repository for full licensing details.
"""Definitions of coordinates and other dimensional metadata."""
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from collections.abc import Container
import copy
from functools import lru_cache
from itertools import zip_longest
import operator
import zlib
import dask.array as da
import numpy as np
import numpy.ma as ma
from iris._data_manager import DataManager
import iris._lazy_data as _lazy
from iris.common import (
AncillaryVariableMetadata,
BaseMetadata,
CellMeasureMetadata,
CFVariableMixin,
CoordMetadata,
DimCoordMetadata,
metadata_manager_factory,
)
import iris.exceptions
from iris.exceptions import warn_once_at_level
import iris.time
import iris.util
#: The default value for ignore_axis which controls guess_coord_axis' behaviour
DEFAULT_IGNORE_AXIS = False
class _DimensionalMetadata(CFVariableMixin, metaclass=ABCMeta):
"""Superclass for dimensional metadata."""
_MODE_ADD = 1
_MODE_SUB = 2
_MODE_MUL = 3
_MODE_DIV = 4
_MODE_RDIV = 5
_MODE_SYMBOL = {
_MODE_ADD: "+",
_MODE_SUB: "-",
_MODE_MUL: "*",
_MODE_DIV: "/",
_MODE_RDIV: "/",
}
# Used by printout methods : __str__ and __repr__
# Overridden in subclasses : Coord->'points', Connectivity->'indices'
_values_array_name = "data"
@abstractmethod
def __init__(
self,
values,
standard_name=None,
long_name=None,
var_name=None,
units=None,
attributes=None,
):
"""Construct a single dimensional metadata object.
Parameters
----------
values :
The values of the dimensional metadata.
standard_name : optional
CF standard name of the dimensional metadata.
long_name : optional
Descriptive name of the dimensional metadata.
var_name : optional
The netCDF variable name for the dimensional metadata.
units : optional
The :class:`~cf_units.Unit` of the dimensional metadata's values.
Can be a string, which will be converted to a Unit object.
attributes : optional
A dictionary containing other cf and user-defined attributes.
"""
# Note: this class includes bounds handling code for convenience, but
# this can only run within instances which are also Coords, because
# only they may actually have bounds. This parent class has no
# bounds-related getter/setter properties, and no bounds keywords in
# its __init__ or __copy__ methods. The only bounds-related behaviour
# it provides is a 'has_bounds()' method, which always returns False.
# Configure the metadata manager.
if not hasattr(self, "_metadata_manager"):
self._metadata_manager = metadata_manager_factory(BaseMetadata)
#: CF standard name of the quantity that the metadata represents.
self.standard_name = standard_name
#: Descriptive name of the metadata.
self.long_name = long_name
#: The netCDF variable name for the metadata.
self.var_name = var_name
#: Unit of the quantity that the metadata represents.
self.units = units
#: Other attributes, including user specified attributes that
#: have no meaning to Iris.
self.attributes = attributes
# Set up DataManager attributes and values.
self._values_dm = None
self._values = values
self._bounds_dm = None # Only ever set on Coord-derived instances.
def __getitem__(self, keys):
"""Return a new dimensional metadata whose values are obtained by conventional array indexing.
.. note::
Indexing of a circular coordinate results in a non-circular
coordinate if the overall shape of the coordinate changes after
indexing.
"""
# Note: this method includes bounds handling code, but it only runs
# within Coord type instances, as only these allow bounds to be set.
# Fetch the values.
values = self._values_dm.core_data()
# Index values with the keys.
_, values = iris.util._slice_data_with_keys(values, keys)
# Copy values after indexing to avoid making metadata that is a
# view on another metadata. This will not realise lazy data.
values = values.copy()
# If the metadata is a coordinate and it has bounds, repeat the above
# with the bounds.
copy_args = {}
if self.has_bounds():
bounds = self._bounds_dm.core_data()
_, bounds = iris.util._slice_data_with_keys(bounds, keys)
# Pass into the copy method : for Coords, it has a 'bounds' key.
copy_args["bounds"] = bounds.copy()
# The new metadata is a copy of the old one with replaced content.
new_metadata = self.copy(values, **copy_args)
return new_metadata
def copy(self, values=None):
"""Return a copy of this dimensional metadata object.
Parameters
----------
values : optional
An array of values for the new dimensional metadata object.
This may be a different shape to the original values array being
copied.
"""
# Note: this is overridden in Coord subclasses, to add bounds handling
# and a 'bounds' keyword.
new_metadata = copy.deepcopy(self)
if values is not None:
new_metadata._values_dm = None
new_metadata._values = values
return new_metadata
@abstractmethod
def cube_dims(self, cube):
"""Identify the cube dims of any _DimensionalMetadata object.
Return the dimensions in the cube of a matching _DimensionalMetadata
object, if any.
Equivalent to cube.coord_dims(self) for a Coord,
or cube.cell_measure_dims for a CellMeasure, and so on.
Simplifies generic code to handle any _DimensionalMetadata objects.
"""
# Only makes sense for specific subclasses.
raise NotImplementedError()
def _sanitise_array(self, src, ndmin):
if _lazy.is_lazy_data(src):
# Lazy data : just ensure ndmin requirement.
ndims_missing = ndmin - src.ndim
if ndims_missing <= 0:
result = src
else:
extended_shape = tuple([1] * ndims_missing + list(src.shape))
result = src.reshape(extended_shape)
else:
# Real data : a few more things to do in this case.
# Ensure the array is writeable.
# NB. Returns the *same object* if src is already writeable.
result = np.require(src, requirements="W")
# Ensure the array has enough dimensions.
# NB. Returns the *same object* if result.ndim >= ndmin
func = ma.array if ma.isMaskedArray(result) else np.array
result = func(result, ndmin=ndmin, copy=False)
# We don't need to copy the data, but we do need to have our
# own view so we can control the shape, etc.
result = result.view()
return result
@property
def _values(self):
"""The _DimensionalMetadata values as a NumPy array."""
return self._values_dm.data.view()
@_values.setter
def _values(self, values):
# Set the values to a new array - as long as it's the same shape.
# Ensure values has an ndmin of 1 and is either a numpy or lazy array.
# This will avoid Scalar _DimensionalMetadata with values of shape ()
# rather than the desired (1,).
values = self._sanitise_array(values, 1)
# Set or update DataManager.
if self._values_dm is None:
self._values_dm = DataManager(values)
else:
self._values_dm.data = values
def _lazy_values(self):
"""Return a lazy array representing the dimensional metadata values."""
return self._values_dm.lazy_data()
def _core_values(self):
"""Value array of this dimensional metadata which may be a NumPy array or a dask array."""
result = self._values_dm.core_data()
if not _lazy.is_lazy_data(result):
result = result.view()
return result
def _has_lazy_values(self):
"""Indicate whether the metadata's values array is a lazy dask array or not."""
return self._values_dm.has_lazy_data()
def summary(
self,
shorten=False,
max_values=None,
edgeitems=2,
linewidth=None,
precision=None,
convert_dates=True,
_section_indices=None,
):
r"""Make a printable text summary.
Parameters
----------
shorten : bool, default=False
If True, produce an abbreviated one-line summary.
If False, produce a multi-line summary, with embedded newlines.
max_values : int or None
If more than this many data values, print truncated data arrays
instead of full contents.
If 0, print only the shape.
The default is 5 if :attr:`shorten`\ =True, or 15 otherwise.
This overrides ``numpy.get_printoptions['threshold']``\ .
linewidth : int or None
Character-width controlling line splitting of array outputs.
If unset, defaults to ``numpy.get_printoptions['linewidth']``\ .
edgeitems : int, default=2
Controls truncated array output.
Overrides ``numpy.getprintoptions['edgeitems']``\ .
precision : int or None
Controls number decimal formatting.
When :attr:`shorten`\ =True this is defaults to 3, in which case it
overrides ``numpy.get_printoptions()['precision']``\ .
convert_dates : bool, default=True
If the units has a calendar, then print array values as date
strings instead of the actual numbers.
Returns
-------
result : str
Output text, with embedded newlines when :attr:`shorten`\ =False.
Notes
-----
.. note::
Arrays are formatted using :meth:`numpy.array2string`. Some aspects
of the array formatting are controllable in the usual way, via
:meth:`numpy.printoptions`, but others are overridden as detailed
above.
Control of those aspects is still available, but only via the call
arguments.
"""
# NOTE: the *private* key "_section_indices" can be set to a dict, to
# return details of which (line, character) each particular section of
# the output text begins at.
# Currently only used by MeshCoord.summary(), which needs this info to
# modify the result string, for idiosyncratic reasons.
def array_summary(data, n_max, n_edge, linewidth, precision):
# Return a text summary of an array.
# Take account of strings, dates and masked data.
result = ""
formatter = None
if convert_dates and self.units.is_time_reference():
# Account for dates, if enabled.
# N.B. a time unit with a long time interval ("months"
# or "years") cannot be converted to a date using
# `num2date`, so gracefully fall back to printing
# values as numbers.
if not self.units.is_long_time_interval():
# Otherwise ... replace all with strings.
if ma.is_masked(data):
mask = data.mask
else:
mask = None
data = np.array(self.units.num2date(data))
data = data.astype(str)
# Masked datapoints do not survive num2date.
if mask is not None:
data = np.ma.masked_array(data, mask)
if ma.is_masked(data):
# Masks are not handled by np.array2string, whereas
# MaskedArray.__str__ is using a private method to convert to
# objects.
# Our preferred solution is to convert to strings *and* fill
# with '--'. This is not ideal because numbers will not align
# with a common numeric format, but there is no *public* logic
# in numpy to arrange that, so let's not overcomplicate.
# It happens that array2string *also* does not use a common
# format (width) for strings, but we fix that below...
data = data.astype(str).filled("--")
if data.dtype.kind == "U":
# Strings : N.B. includes all missing data
# find the longest.
length = max(len(str(x)) for x in data.flatten())
# Pre-apply a common formatting width.
formatter = {"all": lambda x: str(x).ljust(length)}
result = np.array2string(
data,
separator=", ",
edgeitems=n_edge,
threshold=n_max,
max_line_width=linewidth,
formatter=formatter,
precision=precision,
)
return result
units_str = str(self.units)
if self.units.calendar and not shorten:
units_str += f", {self.units.calendar} calendar"
title_str = f"{self.name()} / ({units_str})"
cls_str = type(self).__name__
shape_str = str(self.shape)
# Implement conditional defaults for control args.
if max_values is None:
max_values = 5 if shorten else 15
precision = 3 if shorten else None
n_indent = 4
indent = " " * n_indent
newline_indent = "\n" + indent
if linewidth is not None:
given_array_width = linewidth
else:
given_array_width = np.get_printoptions()["linewidth"]
using_array_width = given_array_width - n_indent * 2
# Make a printout of the main data array (or maybe not, if lazy).
if self._has_lazy_values():
data_str = "<lazy>"
elif max_values == 0:
data_str = "[...]"
else:
data_str = array_summary(
self._values,
n_max=max_values,
n_edge=edgeitems,
linewidth=using_array_width,
precision=precision,
)
# The output under construction, divided into lines for convenience.
output_lines = [""]
def add_output(text, section=None):
# Append output text and record locations of named 'sections'
if section and _section_indices is not None:
# defined a named 'section', recording the current line number
# and character position as its start position
i_line = len(output_lines) - 1
i_char = len(output_lines[-1])
_section_indices[section] = (i_line, i_char)
# Split the text-to-add into lines
lines = text.split("\n")
# Add initial text (before first '\n') to the current line
output_lines[-1] += lines[0]
# Add subsequent lines as additional output lines
for line in lines[1:]:
output_lines.append(line) # Add new lines
if shorten:
add_output(f"<{cls_str}: ")
add_output(f"{title_str} ", section="title")
if data_str != "<lazy>":
# Flatten to a single line, reducing repeated spaces.
def flatten_array_str(array_str):
array_str = array_str.replace("\n", " ")
array_str = array_str.replace("\t", " ")
while " " in array_str:
array_str = array_str.replace(" ", " ")
return array_str
data_str = flatten_array_str(data_str)
# Adjust maximum-width to allow for the title width in the
# repr form.
current_line_len = len(output_lines[-1])
using_array_width = given_array_width - current_line_len
# Work out whether to include a summary of the data values
if len(data_str) > using_array_width:
# Make one more attempt, printing just the *first* point,
# as this is useful for dates.
data_str = data_str = array_summary(
self._values[:1],
n_max=max_values,
n_edge=edgeitems,
linewidth=using_array_width,
precision=precision,
)
data_str = flatten_array_str(data_str)
data_str = data_str[:-1] + ", ...]"
if len(data_str) > using_array_width:
# Data summary is still too long : replace with array
# "placeholder" representation.
data_str = "[...]"
if self.has_bounds():
data_str += "+bounds"
if self.shape != (1,):
# Anything non-scalar : show shape as well.
data_str += f" shape{shape_str}"
# single-line output in 'shorten' mode
add_output(f"{data_str}>", section="data")
else:
# Long (multi-line) output format.
add_output(f"{cls_str} : ")
add_output(f"{title_str}", section="title")
def reindent_data_string(text, n_indent):
lines = [line for line in text.split("\n")]
indent = " " * (n_indent - 1) # allow 1 for the initial '['
# Indent all but the *first* line.
line_1, rest_lines = lines[0], lines[1:]
rest_lines = ["\n" + indent + line for line in rest_lines]
result = line_1 + "".join(rest_lines)
return result
data_array_str = reindent_data_string(data_str, 2 * n_indent)
# NOTE: actual section name is variable here : data/points/indices
data_text = f"{self._values_array_name}: "
if "\n" in data_array_str:
# Put initial '[' here, and the rest on subsequent lines
data_text += "[" + newline_indent + indent + data_array_str[1:]
else:
# All on one line
data_text += data_array_str
# N.B. indent section and record section start after that
add_output(newline_indent)
add_output(data_text, section="data")
if self.has_bounds():
# Add a bounds section : basically just like the 'data'.
if self._bounds_dm.has_lazy_data():
bounds_array_str = "<lazy>"
elif max_values == 0:
bounds_array_str = "[...]"
else:
bounds_array_str = array_summary(
self._bounds_dm.data,
n_max=max_values,
n_edge=edgeitems,
linewidth=using_array_width,
precision=precision,
)
bounds_array_str = reindent_data_string(
bounds_array_str, 2 * n_indent
)
bounds_text = "bounds: "
if "\n" in bounds_array_str:
# Put initial '[' here, and the rest on subsequent lines
bounds_text += "[" + newline_indent + indent + bounds_array_str[1:]
else:
# All on one line
bounds_text += bounds_array_str
# N.B. indent section and record section start after that
add_output(newline_indent)
add_output(bounds_text, section="bounds")
if self.has_bounds():
shape_str += f" bounds{self._bounds_dm.shape}"
# Add shape section (always)
add_output(newline_indent)
add_output(f"shape: {shape_str}", section="shape")
# Add dtype section (always)
add_output(newline_indent)
add_output(f"dtype: {self.dtype}", section="dtype")
for name in self._metadata_manager._fields:
if name == "units":
# This was already included in the header line
continue
val = getattr(self, name, None)
if isinstance(val, Container):
# Don't print empty containers, like attributes={}
show = bool(val)
else:
# Don't print properties when not present, or set to None,
# or False.
# This works OK as long as we are happy to treat all
# boolean properties as 'off' when False : Which happens to
# work for all those defined so far.
show = val is not None and val is not False
if show:
if name == "attributes":
# Use a multi-line form for this.
add_output(newline_indent)
add_output("attributes:", section="attributes")
max_attname_len = max(len(attr) for attr in val.keys())
for attrname, attrval in val.items():
attrname = attrname.ljust(max_attname_len)
if isinstance(attrval, str):
# quote strings
attrval = repr(attrval)
# and abbreviate really long ones
attrval = iris.util.clip_string(attrval)
attr_string = f"{attrname} {attrval}"
add_output(newline_indent + indent + attr_string)
else:
# add a one-line section for this property
# (aka metadata field)
add_output(newline_indent)
add_output(f"{name}: {val!r}", section=name)
return "\n".join(output_lines)
def __str__(self):
return self.summary()
def __repr__(self):
return self.summary(shorten=True)
def __eq__(self, other):
if other is self:
return True
# Note: this method includes bounds handling code, but it only runs
# within Coord type instances, as only these allow bounds to be set.
eq = NotImplemented
# If the other object has a means of getting its definition, then do
# the comparison, otherwise return a NotImplemented to let Python try
# to resolve the operator elsewhere.
if hasattr(other, "metadata"):
# metadata comparison
eq = self.metadata == other.metadata
# data values comparison
if eq and eq is not NotImplemented:
eq = iris.util.array_equal(
self._core_values(), other._core_values(), withnans=True
)
# Also consider bounds, if we have them.
# (N.B. though only Coords can ever actually *have* bounds).
if eq and eq is not NotImplemented:
if self.has_bounds() and other.has_bounds():
eq = iris.util.array_equal(
self.core_bounds(), other.core_bounds(), withnans=True
)
else:
eq = not self.has_bounds() and not other.has_bounds()
return eq
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
# Must supply __hash__ as Python 3 does not enable it if __eq__ is defined.
# NOTE: Violates "objects which compare equal must have the same hash".
# We ought to remove this, as equality of two dimensional metadata can
# *change*, so they really should not be hashable.
# However, current code needs it, e.g. so we can put them in sets.
# Fixing it will require changing those uses. See #962 and #1772.
def __hash__(self):
return hash(id(self))
def __binary_operator__(self, other, mode_constant):
"""Perform common code which is called by add, sub, mul and div.
Mode constant is one of ADD, SUB, MUL, DIV, RDIV
.. note::
The unit is *not* changed when doing scalar operations on a
metadata object. This means that a metadata object which represents
"10 meters" when multiplied by a scalar i.e. "1000" would result in
a metadata object of "10000 meters". An alternative approach could
be taken to multiply the *unit* by 1000 and the resultant metadata
object would represent "10 kilometers".
"""
# Note: this method includes bounds handling code, but it only runs
# within Coord type instances, as only these allow bounds to be set.
if isinstance(other, _DimensionalMetadata):
emsg = (
f"{self.__class__.__name__} "
f"{self._MODE_SYMBOL[mode_constant]} "
f"{other.__class__.__name__}"
)
raise iris.exceptions.NotYetImplementedError(emsg)
if isinstance(other, (int, float, np.number)):
def op(values):
if mode_constant == self._MODE_ADD:
new_values = values + other
elif mode_constant == self._MODE_SUB:
new_values = values - other
elif mode_constant == self._MODE_MUL:
new_values = values * other
elif mode_constant == self._MODE_DIV:
new_values = values / other
elif mode_constant == self._MODE_RDIV:
new_values = other / values
return new_values
new_values = op(self._values_dm.core_data())
result = self.copy(new_values)
if self.has_bounds():
result.bounds = op(self._bounds_dm.core_data())
else:
# must return NotImplemented to ensure invocation of any
# associated reflected operator on the "other" operand
# see https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types
result = NotImplemented
return result
def __add__(self, other):
return self.__binary_operator__(other, self._MODE_ADD)
def __sub__(self, other):
return self.__binary_operator__(other, self._MODE_SUB)
def __mul__(self, other):
return self.__binary_operator__(other, self._MODE_MUL)
def __div__(self, other):
return self.__binary_operator__(other, self._MODE_DIV)
def __truediv__(self, other):
return self.__binary_operator__(other, self._MODE_DIV)
__radd__ = __add__
def __rsub__(self, other):
return (-self) + other
def __rdiv__(self, other):
return self.__binary_operator__(other, self._MODE_RDIV)
def __rtruediv__(self, other):
return self.__binary_operator__(other, self._MODE_RDIV)
__rmul__ = __mul__
def __neg__(self):
values = -self._core_values()
copy_args = {}
if self.has_bounds():
copy_args["bounds"] = -self.core_bounds()
return self.copy(values, **copy_args)
def convert_units(self, unit):
"""Change the units, converting the values of the metadata."""
# If the coord has units convert the values in points (and bounds if
# present).
# Note: this method includes bounds handling code, but it only runs
# within Coord type instances, as only these allow bounds to be set.
if self.units.is_unknown():
raise iris.exceptions.UnitConversionError(
"Cannot convert from unknown units. "
'The "units" attribute may be set directly.'
)
# Set up a delayed conversion for use if either values or bounds (if
# present) are lazy.
# Make fixed copies of old + new units for a delayed conversion.
old_unit = self.units
new_unit = unit
# Define a delayed conversion operation (i.e. a callback).
def pointwise_convert(values):
return old_unit.convert(values, new_unit)
if self._has_lazy_values():
new_values = _lazy.lazy_elementwise(self._lazy_values(), pointwise_convert)
else:
new_values = self.units.convert(self._values, unit)
self._values = new_values
if self.has_bounds():
if self.has_lazy_bounds():
new_bounds = _lazy.lazy_elementwise(
self.lazy_bounds(), pointwise_convert
)
else:
new_bounds = self.units.convert(self.bounds, unit)
self.bounds = new_bounds
self.units = unit
def is_compatible(self, other, ignore=None):
"""Return whether the current dimensional metadata object is compatible with another."""
compatible = self.name() == other.name() and self.units == other.units
if compatible:
common_keys = set(self.attributes).intersection(other.attributes)
if ignore is not None:
if isinstance(ignore, str):
ignore = (ignore,)
common_keys = common_keys.difference(ignore)
for key in common_keys:
if np.any(self.attributes[key] != other.attributes[key]):
compatible = False
break
return compatible
@property
def dtype(self):
"""The NumPy dtype of the current dimensional metadata object, as specified by its values."""
return self._values_dm.dtype
@property
def ndim(self):
"""Return the number of dimensions of the current dimensional metadata object."""
return self._values_dm.ndim
def has_bounds(self):
"""Indicate whether the current dimensional metadata object has a bounds array."""
# Allows for code to handle unbounded dimensional metadata agnostic of
# whether the metadata is a coordinate or not.
return False
@property
def shape(self):
"""The fundamental shape of the metadata, expressed as a tuple."""
return self._values_dm.shape
def xml_element(self, doc):
"""Create XML element.
Create the :class:`xml.dom.minidom.Element` that describes this
:class:`_DimensionalMetadata`.
Parameters
----------
doc :
The parent :class:`xml.dom.minidom.Document`.
Returns
-------
:class:`xml.dom.minidom.Element`
:class:`xml.dom.minidom.Element` that will describe this
:class:`_DimensionalMetadata`.
"""
# Create the XML element as the camelCaseEquivalent of the
# class name.
element_name = type(self).__name__
element_name = element_name[0].lower() + element_name[1:]
element = doc.createElement(element_name)
element.setAttribute("id", self._xml_id())
if self.standard_name:
element.setAttribute("standard_name", str(self.standard_name))
if self.long_name:
element.setAttribute("long_name", str(self.long_name))
if self.var_name:
element.setAttribute("var_name", str(self.var_name))
element.setAttribute("units", repr(self.units))
if isinstance(self, Coord):
if self.climatological:
element.setAttribute("climatological", str(self.climatological))
if self.attributes:
attributes_element = doc.createElement("attributes")
for name in sorted(self.attributes.keys()):
attribute_element = doc.createElement("attribute")
attribute_element.setAttribute("name", name)
attribute_element.setAttribute("value", str(self.attributes[name]))
attributes_element.appendChild(attribute_element)
element.appendChild(attributes_element)
if isinstance(self, Coord):
if self.coord_system:
element.appendChild(self.coord_system.xml_element(doc))
# Add the values
element.setAttribute("value_type", str(self._value_type_name()))
element.setAttribute("shape", str(self.shape))
# The values are referred to "points" of a coordinate and "data"
# otherwise.
if isinstance(self, Coord):
values_term = "points"
# TODO: replace with isinstance(self, Connectivity) once Connectivity
# is re-integrated here (currently in experimental.ugrid).
elif hasattr(self, "indices"):
values_term = "indices"
else:
values_term = "data"
element.setAttribute(values_term, self._xml_array_repr(self._values))
return element
def _xml_id_extra(self, unique_value):
return unique_value
def _xml_id(self):
# Returns a consistent, unique string identifier for this coordinate.
unique_value = b""
if self.standard_name:
unique_value += self.standard_name.encode("utf-8")
unique_value += b"\0"
if self.long_name:
unique_value += self.long_name.encode("utf-8")
unique_value += b"\0"
unique_value += str(self.units).encode("utf-8") + b"\0"
for k, v in sorted(self.attributes.items()):
unique_value += (str(k) + ":" + str(v)).encode("utf-8") + b"\0"
# Extra modifications to unique_value that are specialised in child
# classes
unique_value = self._xml_id_extra(unique_value)
# Mask to ensure consistency across Python versions & platforms.
crc = zlib.crc32(unique_value) & 0xFFFFFFFF
return "%08x" % (crc,)
@staticmethod
def _xml_array_repr(data):
if hasattr(data, "to_xml_attr"):
result = data._values.to_xml_attr()
else:
result = iris.util.format_array(data)
return result
def _value_type_name(self):
"""Provide a simple name for the data type of the dimensional metadata values."""
dtype = self._core_values().dtype
kind = dtype.kind
if kind in "SU":
# Establish the basic type name for 'string' type data.
if kind == "S":
value_type_name = "bytes"
else:
value_type_name = "string"
else:
value_type_name = dtype.name
return value_type_name
class AncillaryVariable(_DimensionalMetadata):
def __init__(
self,
data,
standard_name=None,
long_name=None,
var_name=None,
units=None,
attributes=None,
):
"""Construct a single ancillary variable.
Parameters
----------
data :
The values of the ancillary variable.
standard_name : optional
CF standard name of the ancillary variable.
long_name : optional
Descriptive name of the ancillary variable.
var_name : optional
The netCDF variable name for the ancillary variable.
units : optional
The :class:`~cf_units.Unit` of the ancillary variable's values.
Can be a string, which will be converted to a Unit object.
attributes : optional
A dictionary containing other cf and user-defined attributes.
"""
# Configure the metadata manager.
if not hasattr(self, "_metadata_manager"):
self._metadata_manager = metadata_manager_factory(AncillaryVariableMetadata)
super().__init__(
values=data,
standard_name=standard_name,
long_name=long_name,
var_name=var_name,
units=units,
attributes=attributes,
)
@property
def data(self):
return self._values
@data.setter
def data(self, data):
self._values = data
def lazy_data(self):
"""Return a lazy array representing the ancillary variable's data.
Accessing this method will never cause the data values to be loaded.
Similarly, calling methods on, or indexing, the returned Array
will not cause the ancillary variable to have loaded data.
If the data have already been loaded for the ancillary variable, the
returned Array will be a new lazy array wrapper.
Returns
-------
A lazy array, representing the ancillary variable data array.
"""
return super()._lazy_values()
def core_data(self):
"""Return data array at the core of this ancillary variable.
The data array at the core of this ancillary variable, which may be a
NumPy array or a dask array.
"""
return super()._core_values()
def has_lazy_data(self):
"""Indicate whether the ancillary variable's data array is a lazy dask array or not."""
return super()._has_lazy_values()
def cube_dims(self, cube):
"""Return the cube dimensions of this AncillaryVariable.
Equivalent to "cube.ancillary_variable_dims(self)".
"""
return cube.ancillary_variable_dims(self)
class CellMeasure(AncillaryVariable):
"""A CF Cell Measure, providing area or volume properties of a cell.
A CF Cell Measure, providing area or volume properties of a cell
where these cannot be inferred from the Coordinates and
Coordinate Reference System.
"""
def __init__(
self,