Skip to content

Commit

Permalink
Merge pull request #846 from lee1043/add_taylor_diagram
Browse files Browse the repository at this point in the history
Taylor Diagram
  • Loading branch information
lee1043 committed May 3, 2022
2 parents fee23b5 + 4b0623e commit 5adf579
Show file tree
Hide file tree
Showing 7 changed files with 431 additions and 31 deletions.
3 changes: 2 additions & 1 deletion pcmdi_metrics/graphics/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# init file
# isort: skip_file
from .share.plot_utils import add_logo, download_archived_results # noqa
from .share.plot_utils import add_logo, download_archived_results, combine_ref_dicts # noqa
from .share.read_json_mean_clim import (
read_mean_clim_json_files,
normalize_by_median,
Expand All @@ -12,3 +12,4 @@
from .portrait_plot.portrait_plot_lib import portrait_plot # noqa
from .portrait_plot.portrait_plot_lib import prepare_data # noqa
from .bar_chart.lib import BarChart # noqa
from .taylor_diagram.taylor_diagram import TaylorDiagram # noqa
271 changes: 245 additions & 26 deletions pcmdi_metrics/graphics/demo/mean_clim_plots_test_model.ipynb

Large diffs are not rendered by default.

4 changes: 3 additions & 1 deletion pcmdi_metrics/graphics/share/Metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import numpy as np
import pandas as pd

from pcmdi_metrics.graphics import read_mean_clim_json_files
from pcmdi_metrics.graphics import combine_ref_dicts, read_mean_clim_json_files


class Metrics:
Expand Down Expand Up @@ -54,6 +54,7 @@ def __init__(self, files):
self.df_dict,
self.var_list,
self.var_unit_list,
self.var_ref_dict,
self.regions,
self.stats,
) = read_mean_clim_json_files(files)
Expand Down Expand Up @@ -145,6 +146,7 @@ def merge(self, metrics_obj):

result.var_list = list(set(self.var_list + metrics_obj.var_list))
result.var_unit_list = list(set(self.var_unit_list + metrics_obj.var_unit_list))
result.var_ref_dict = combine_ref_dicts(self.var_ref_dict, metrics_obj.var_ref_dict)
result.regions = list(set(self.regions + metrics_obj.regions))
result.stats = list(set(self.stats + metrics_obj.stats))

Expand Down
31 changes: 31 additions & 0 deletions pcmdi_metrics/graphics/share/plot_utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import os
import urllib.request
from collections import defaultdict

import matplotlib.pyplot as plt
import requests
Expand Down Expand Up @@ -74,3 +75,33 @@ def download_archived_results(path, local_dir):
except Exception:
print(path, 'not exist in ', url_head_github_repo)
pass


def combine_ref_dicts(d1, d2):
"""
Combine two dictionaries for reference datasets of variables, raising warning message if different reference datasets were used.
Below code is revised from https://stackoverflow.com/a/5946322
Parameters
----------
d1: dict
d2: dict
Return
------
dd: merged dict
"""
# Merge dicts
dd = defaultdict(list)
for d in (d1, d2): # you can list as many input dicts as you want here
for key, value in d.items():
dd[key].append(value)
# Check consistency in content
for key in dd:
if len(list(set(dd[key]))) == 1:
dd[key] = dd[key][0]
else:
print('Warning: differnt reference datasets detected for ' + key + ': ', dd[key])
# Convert outcome to normal dict and return
dd = dict(dd)
return dd
16 changes: 13 additions & 3 deletions pcmdi_metrics/graphics/share/read_json_mean_clim.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,33 +24,37 @@ def read_mean_clim_json_files(
(Rows: models, Columns: variables (i.e., 2d array)
- `var_list`: list of string, all variables from JSON files
- `var_unit_list`: list of string, all variables and its units from JSON files
- `var_ref_list`: list of string, list for default reference dataset for each variable
- `regions`: list of string, regions
- `stats`: list of string, statistics
"""
# Read JSON and get unit for each variable
results_dict = {} # merged dict by reading all JSON files
var_list = []
var_unit_list = []
var_ref_dict = {}
regions_all = []

for json_file in json_list:
if debug:
print("json_file:", json_file)
with open(json_file) as fj:
dict_temp = json.load(fj)
var = dict_temp["Variable"]["id"]
dict_temp = json.load(fj) # e.g., load contents of precipitation json file
var = dict_temp["Variable"]["id"] # e.g., 'pr'
if "level" in list(dict_temp["Variable"].keys()):
var += "-" + str(int(dict_temp["Variable"]["level"] / 100.0)) # Pa to hPa
results_dict[var] = dict_temp
unit = extract_unit(var, results_dict[var])
var_unit = var + " [" + unit + "]"
var_list.append(var)
var_unit_list.append(var_unit)
var_ref_dict[var] = extract_ref(var, results_dict[var])
regions_all.extend(extract_region(var, results_dict[var]))
if stats is None:
stats = extract_stat(var, results_dict[var])
if debug:
print("var_unit_list:", var_unit_list)
print("var_ref_dict:", var_ref_dict)

if regions is None:
regions = list(set(regions_all)) # Remove duplicates
Expand Down Expand Up @@ -81,7 +85,7 @@ def read_mean_clim_json_files(
results_dict, var_list, region, stat, season, mip, debug
)

return df_dict, var_list, var_unit_list, regions, stats
return df_dict, var_list, var_unit_list, var_ref_dict, regions, stats


def extract_unit(var, results_dict_var):
Expand All @@ -90,6 +94,12 @@ def extract_unit(var, results_dict_var):
return units


def extract_ref(var, results_dict_var):
model_list = sorted(list(results_dict_var["RESULTS"].keys()))
ref = results_dict_var["RESULTS"][model_list[0]]["default"]["source"]
return ref


def extract_region(var, results_dict_var):
region_list, stat_list = extract_region_stat(var, results_dict_var)
return region_list
Expand Down
1 change: 1 addition & 0 deletions pcmdi_metrics/graphics/taylor_diagram/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# init file
136 changes: 136 additions & 0 deletions pcmdi_metrics/graphics/taylor_diagram/taylor_diagram.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
def TaylorDiagram(
stddev, corrcoef, refstd, fig, colors,
normalize=True,
labels=None, markers=None, markersizes=None, zorders=None,
ref_label=None, smax=None):

"""Plot a Taylor diagram.
This code was adpated from the ILAMB code by Nathan Collier found here:
https://github.com/rubisco-sfa/ILAMB/blob/master/src/ILAMB/Post.py#L80,
which was revised by Jiwoo Lee to enable more flexible customization of the plot.
The original code was written by Yannick Copin that can be found here:
https://gist.github.com/ycopin/3342888
Parameters
----------
stddev : numpy.ndarray
an array of standard deviations
corrcoeff : numpy.ndarray
an array of correlation coefficients
refstd : float
the reference standard deviation
fig : matplotlib figure
the matplotlib figure
colors : array
an array of colors for each element of the input arrays
normalize : bool, optional
disable to skip normalization of the standard deviation
labels : list, optional
list of text for labels
markers : list, optional
list of marker type
markersizes : list, optional
list of integer for marker size
zorders : list, optional
list of integer for zorder
ref_label : str, optional
label for reference data
smax : int or float, optional
maximum of axis range for (normalized) standard deviation
Return
------
fig : matplotlib figure
the matplotlib figure
ax : matplotlib axis
the matplotlib axis
"""
import mpl_toolkits.axisartist.floating_axes as FA
import mpl_toolkits.axisartist.grid_finder as GF
import numpy as np
from matplotlib.projections import PolarAxes

# define transform
tr = PolarAxes.PolarTransform()

# correlation labels
rlocs = np.concatenate((np.arange(10) / 10., [0.95, 0.99]))
tlocs = np.arccos(rlocs)
gl1 = GF.FixedLocator(tlocs)
tf1 = GF.DictFormatter(dict(zip(tlocs, map(str, rlocs))))

# standard deviation axis extent
if normalize:
stddev = stddev / refstd
refstd = 1.
smin = 0
if smax is None:
smax = max(2.0, 1.1 * stddev.max())

# add the curvilinear grid
ghelper = FA.GridHelperCurveLinear(tr,
extremes=(0, np.pi / 2, smin, smax),
grid_locator1=gl1,
tick_formatter1=tf1)
ax = FA.FloatingSubplot(fig, 111, grid_helper=ghelper)
fig.add_subplot(ax)

# adjust axes
ax.axis["top"].set_axis_direction("bottom")
ax.axis["top"].toggle(ticklabels=True, label=True)
ax.axis["top"].major_ticklabels.set_axis_direction("top")
ax.axis["top"].label.set_axis_direction("top")
ax.axis["top"].label.set_text("Correlation")
ax.axis["left"].set_axis_direction("bottom")
if normalize:
ax.axis["left"].label.set_text("Normalized standard deviation")
else:
ax.axis["left"].label.set_text("Standard deviation")
ax.axis["right"].set_axis_direction("top")
ax.axis["right"].toggle(ticklabels=True)
ax.axis["right"].major_ticklabels.set_axis_direction("left")
ax.axis["bottom"].set_visible(False)
ax.grid(True)

ax = ax.get_aux_axes(tr)
# Plot data
corrcoef = corrcoef.clip(-1, 1)
for i in range(len(corrcoef)):
# customize label
if labels is not None:
label = labels[i]
else:
label = None
# customize marker
if markers is not None:
marker = markers[i]
else:
marker = 'o'
# customize marker size
if markersizes is not None:
ms = markersizes[i]
else:
ms = 8
# customize marker order
if zorders is not None:
zorder = zorders[i]
else:
zorder = None
# customize end
ax.plot(np.arccos(corrcoef[i]), stddev[i], marker, color=colors[i], mew=0, ms=ms, label=label, zorder=zorder)

# Add reference point and stddev contour
l, = ax.plot([0], refstd, 'k*', ms=12, mew=0, label=ref_label)
t = np.linspace(0, np.pi / 2)
r = np.zeros_like(t) + refstd
ax.plot(t, r, 'k--')

# centralized rms contours
rs, ts = np.meshgrid(np.linspace(smin, smax),
np.linspace(0, np.pi / 2))
rms = np.sqrt(refstd**2 + rs**2 - 2 * refstd * rs * np.cos(ts))
contours = ax.contour(ts, rs, rms, 5, colors='k', alpha=0.4)
ax.clabel(contours, fmt='%1.1f')

return fig, ax

0 comments on commit 5adf579

Please sign in to comment.