diff --git a/.circleci/config.yml b/.circleci/config.yml index 5b2a4b01c..57dfc536e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -16,7 +16,7 @@ aliases: - &create_conda_env name: create_conda_env environment: - PKGS: "vcs vcsaddons mesalib matplotlib scipy cia testsrunner 'proj4<5' 'vtk-cdat>8.1' libnetcdf=4.6.2" + PKGS: "'cdms2<3.1.2.2019' vcs vcsaddons mesalib matplotlib scipy cia testsrunner 'proj4<5' 'vtk-cdat>8.1' libnetcdf=4.6.2" CHANNELS: "-c cdat/label/nightly -c conda-forge -c pcmdi" command: | if [ -d ${HOME}/miniconda ]; then @@ -102,9 +102,9 @@ jobs: FFMPEG: "'ffmpeg>4' 'libpng>1.6.34'" steps: - checkout - - restore_cache: - keys: - - macos_py2_2019-06-01 + #- restore_cache: + # keys: + # - macos_py2_2019-06-01 - run: *create_conda_env - save_cache: key: macos_py2_2019-06-01 @@ -126,20 +126,17 @@ jobs: OS: "osx-64" PY_VER: "py3" FFMPEG: "'ffmpeg>4' 'libpng>1.6.34'" - COVERAGE: "-c tests/coverage.json --coverage-from-egg" - COVERAGE_PKGS: "coverage coveralls" steps: - checkout - - restore_cache: - keys: - - macos_py3_2019-06-01 + #- restore_cache: + # keys: + # - macos_py3_2019-06-01 - run: *create_conda_env - save_cache: key: macos_py3_2019-06-01 paths: /Users/distiller/miniconda - run: *setup_pmp - run: *run_pmp_tests - - run: *run_coveralls - store_artifacts: path: tests_html destination: tests_html @@ -157,9 +154,9 @@ jobs: FFMPEG: "ffmpeg" steps: - checkout - - restore_cache: - keys: - - linux_py2_2019-06-01 + #- restore_cache: + # keys: + # - linux_py2_2019-06-01 - run: *create_conda_env - save_cache: key: linux_py2_2019-06-01 @@ -185,9 +182,9 @@ jobs: COVERAGE_PKGS: "coverage coveralls" steps: - checkout - - restore_cache: - keys: - - linux_py3_2019-06-01 + #- restore_cache: + # keys: + # - linux_py3_2019-06-01 - run: *create_conda_env - save_cache: key: linux_py3_2019-06-01 @@ -208,10 +205,8 @@ workflows: version: 2 pcmdi_metrics: jobs: + - macos_pmp_py3 - macos_pmp_py2 - - macos_pmp_py3: - requires: - - macos_pmp_py2 - linux_pmp_py2 - linux_pmp_py3: requires: diff --git a/doc/Diurnal Cycle Diagram.pdf b/doc/Diurnal Cycle Diagram.pdf index cb6fa79b1..e598ffd42 100644 Binary files a/doc/Diurnal Cycle Diagram.pdf and b/doc/Diurnal Cycle Diagram.pdf differ diff --git a/doc/Diurnal Cycle Diagram.pptx b/doc/Diurnal Cycle Diagram.pptx new file mode 100644 index 000000000..3e4a9eeab Binary files /dev/null and b/doc/Diurnal Cycle Diagram.pptx differ diff --git a/doc/jupyter/Demo/Demo_0_download_data.ipynb b/doc/jupyter/Demo/Demo_0_download_data.ipynb index 15a63e60e..41445bbf5 100644 --- a/doc/jupyter/Demo/Demo_0_download_data.ipynb +++ b/doc/jupyter/Demo/Demo_0_download_data.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Download Data\n", + "# Download Data and Prepare Demos\n", "\n", "This Notebook setup the data for the other demos" ] @@ -26,9 +26,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now let's retrieve these sample files locally, please edit the foloowing to match a good location on your system.\n", + "Now let's retrieve these sample files locally, please edit the followings to match a good location on your system.\n", "\n", - "You will need to edit the subsequent tutorials to match this path" + "You will need to edit the subsequent tutorials to match this path.\n", + "\n", + "`demo_data_directory` is a path where you would like to download our demo data\n", + "`demo_output_directory` is a path where you would like the demos output to be dumped." ] }, { @@ -37,7 +40,10 @@ "metadata": {}, "outputs": [], "source": [ - "demo_data_directory = \"demo_data\"" + "# This is where you will be downloading the sample_data\n", + "demo_data_directory = \"demo_data\"\n", + "# this line is where your output will be stored\n", + "demo_output_directory = \"demo_output\"" ] }, { @@ -50,7 +56,13 @@ "output_type": "stream", "text": [ "MD5: data_files.txt\n", - "Downloading: 'PCMDIobs2.0/atmos/mon/pr/GPCP-2-3/gn/v20190301//pr_mon_GPCP-2-3_BE_gn_197901-201803.nc' from 'https://pcmdiweb.llnl.gov/pss/pmpdata/' in: demo_data/PCMDIobs2.0/atmos/mon/pr/GPCP-2-3/gn/v20190301//pr_mon_GPCP-2-3_BE_gn_197901-201803.nc\n" + "Downloading: 'PCMDIobs2.0/atmos/mon/pr/GPCP-2-3/gn/v20190301//pr_mon_GPCP-2-3_BE_gn_197901-201803.nc' from 'https://pcmdiweb.llnl.gov/pss/pmpdata/' in: demo_data/PCMDIobs2.0/atmos/mon/pr/GPCP-2-3/gn/v20190301//pr_mon_GPCP-2-3_BE_gn_197901-201803.nc\n", + "Downloading: 'pmpobs_v1.0/atm/mo/rlut/CERES/ac/rlut_CERES_000001-000012_ac.nc' from 'https://pcmdiweb.llnl.gov/pss/pmpdata/' in: demo_data/pmpobs_v1.0/atm/mo/rlut/CERES/ac/rlut_CERES_000001-000012_ac.nc\n", + "Downloading: 'example_data/atm/mo/rlut/ac/CMIP5.historical.ACCESS1-0.r1i1p1.mon.rlut.198101-200512.AC.v20190225.nc' from 'https://pcmdiweb.llnl.gov/pss/pmpdata/' in: demo_data/example_data/atm/mo/rlut/ac/CMIP5.historical.ACCESS1-0.r1i1p1.mon.rlut.198101-200512.AC.v20190225.nc\n", + "Downloading: 'example_data/atm/mo/rlut/ac/CMIP5.historical.CSIRO-Mk3-6-0.r1i1p1.mon.rlut.198101-200512.AC.v20190225.nc' from 'https://pcmdiweb.llnl.gov/pss/pmpdata/' in: demo_data/example_data/atm/mo/rlut/ac/CMIP5.historical.CSIRO-Mk3-6-0.r1i1p1.mon.rlut.198101-200512.AC.v20190225.nc\n", + "Downloading: 'example_data/atm/mo/pr/CMIP5.CMIP.historical.CNRM-CERFACS.CNRM-CM5-2.r1i1p1.mon.pr.atmos.glb-2d-gu.v20130401.0000000.0.nc' from 'https://pcmdiweb.llnl.gov/pss/pmpdata/' in: demo_data/example_data/atm/mo/pr/CMIP5.CMIP.historical.CNRM-CERFACS.CNRM-CM5-2.r1i1p1.mon.pr.atmos.glb-2d-gu.v20130401.0000000.0.nc\n", + "Downloading: 'example_data/atm/mo/pr/CMIP5.CMIP.historical.NSF-DOE-NCAR.CESM1-WACCM.r1i1p1.mon.pr.atmos.glb-2d-gu.v20130314.0000000.0.nc' from 'https://pcmdiweb.llnl.gov/pss/pmpdata/' in: demo_data/example_data/atm/mo/pr/CMIP5.CMIP.historical.NSF-DOE-NCAR.CESM1-WACCM.r1i1p1.mon.pr.atmos.glb-2d-gu.v20130314.0000000.0.nc\n", + "Downloading: 'example_data/atm/3hr/pr/pr_3hr_IPSL-CM5A-LR_historical_r1i1p1_5x5_1997-1999.nc' from 'https://pcmdiweb.llnl.gov/pss/pmpdata/' in: demo_data/example_data/atm/3hr/pr/pr_3hr_IPSL-CM5A-LR_historical_r1i1p1_5x5_1997-1999.nc\n" ] } ], @@ -60,6 +72,51 @@ "cdat_info.download_sample_data_files(\"data_files.txt\", demo_data_directory)" ] }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Preparing parameter file: basic_param.py\n", + "Preparing parameter file: basic_diurnal_std_daily_mean.py\n", + "Preparing parameter file: basic_diurnal_fourierAllGrid.py\n", + "Preparing parameter file: basic_monsoon_wang_param.py\n", + "Preparing parameter file: basic_diurnal_std_hourly_mean.py\n", + "Preparing parameter file: basic_diurnal_fourier.py\n", + "Preparing parameter file: basic_diurnal_compute_daily_mean.py\n", + "Preparing parameter file: basic_diurnal_composite.py\n", + "Saving User Choices\n" + ] + } + ], + "source": [ + "# this prepares the various parameter files used in the demo notebooks to reflect where you downloaded the data\n", + "import glob\n", + "\n", + "# Dictionary for template_files substitutions \n", + "sub_dict = {\n", + " \"INPUT_DIR\": demo_data_directory,\n", + " \"OUTPUT_DIR\": demo_output_directory\n", + "}\n", + "for name in glob.glob(\"*.in\"):\n", + " with open(name) as template_file:\n", + " print(\"Preparing parameter file: {}\".format(name[:-3]))\n", + " template = template_file.read()\n", + " for key in sub_dict:\n", + " template = template.replace(\"${}$\".format(key), sub_dict[key])\n", + " with open(name[:-3], \"w\") as param_file:\n", + " param_file.write(template)\n", + "\n", + "print(\"Saving User Choices\") \n", + "with open(\"user_choices.py\", \"w\") as f:\n", + " print(\"demo_data_directory = '{}'\".format(demo_data_directory), file=f)\n", + " print(\"demo_output_directory = '{}'\".format(demo_output_directory), file=f)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -67,6 +124,13 @@ "You're done!\n", "Please proceed to the next tutorial" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -86,8 +150,12 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.7" - } + }, + "selected_variables": [], + "vcdat_file_path": "", + "vcdat_loaded_variables": [], + "vcdat_variable_info": {} }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/doc/jupyter/Demo/Demo_1_mean_climate.ipynb b/doc/jupyter/Demo/Demo_1_mean_climate.ipynb index c49010beb..6a548f8d3 100644 --- a/doc/jupyter/Demo/Demo_1_mean_climate.ipynb +++ b/doc/jupyter/Demo/Demo_1_mean_climate.ipynb @@ -10,7 +10,7 @@ "\n", "It is expected that you have downloaded the sample data as demonstrated in [the download notebook](Demo_0_download_data.ipynb)\n", "\n", - "Please edit the path in the following cell to reflect the location on your system where you downloaded the data" + "The following cell reads in the choices you made during the download data step" ] }, { @@ -19,34 +19,7 @@ "metadata": {}, "outputs": [], "source": [ - "# This is where you downloaded the sample_data\n", - "demo_data_directory = \"demo_data\"\n", - "# this line is where your output will be stored\n", - "demo_output_directory = \"demo_output\"" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "# this prepares the various parameter files used in this demo notebooks to reflect where you downloaded the data\n", - "import glob\n", - "\n", - "# Dictionary for template_files substitutions \n", - "sub_dict = {\n", - " \"INPUT_DIR\": demo_data_directory,\n", - " \"OUTPUT_DIR\": demo_output_directory\n", - "}\n", - "for name in glob.glob(\"*.in\"):\n", - " with open(name) as template_file:\n", - " print(\"Preparing parameter file: {}\".format(name[:-3]))\n", - " template = template_file.read()\n", - " for key in sub_dict:\n", - " template = template.replace(\"${}$\".format(key), sub_dict[key])\n", - " with open(name[:-3], \"w\") as param_file:\n", - " param_file.write(template)" + "from user_choices import demo_data_directory, demo_output_directory" ] }, { @@ -59,9 +32,72 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 2, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "import os\n", + "\n", + "#\n", + "# OPTIONS ARE SET BY USER IN THIS FILE AS INDICATED BELOW BY:\n", + "#\n", + "#\n", + "\n", + "# RUN IDENTIFICATION\n", + "# DEFINES A SUBDIRECTORY TO METRICS OUTPUT RESULTS SO MULTIPLE CASES CAN\n", + "# BE COMPARED\n", + "case_id = 'basicTest'\n", + "\n", + "# LIST OF MODEL VERSIONS TO BE TESTED - WHICH ARE EXPECTED TO BE PART OF\n", + "# CLIMATOLOGY FILENAME\n", + "test_data_set = ['ACCESS1-0', 'CSIRO-Mk3-6-0']\n", + "\n", + "\n", + "# VARIABLES TO USE\n", + "vars = ['rlut']\n", + "\n", + "\n", + "# Observations to use at the moment \"default\" or \"alternate\"\n", + "reference_data_set = ['all']\n", + "#ext = '.nc'\n", + "\n", + "# INTERPOLATION OPTIONS\n", + "target_grid = '2.5x2.5' # OPTIONS: '2.5x2.5' or an actual cdms2 grid object\n", + "regrid_tool = 'regrid2' # 'regrid2' # OPTIONS: 'regrid2','esmf'\n", + "# OPTIONS: 'linear','conservative', only if tool is esmf\n", + "regrid_method = 'linear'\n", + "regrid_tool_ocn = 'esmf' # OPTIONS: \"regrid2\",\"esmf\"\n", + "# OPTIONS: 'linear','conservative', only if tool is esmf\n", + "regrid_method_ocn = 'linear'\n", + "\n", + "# Templates for climatology files\n", + "# %(param) will subsitute param with values in this file\n", + "filename_template = \"CMIP5.historical.%(model_version).r1i1p1.mon.%(variable).198101-200512.AC.v20190225.nc\"\n", + "\n", + "# filename template for landsea masks ('sftlf')\n", + "sftlf_filename_template = \"sftlf_%(model_version).nc\"\n", + "generate_sftlf = True # if land surface type mask cannot be found, generate one\n", + "\n", + "\n", + "pth = os.path.dirname(__file__)\n", + "# ROOT PATH FOR MODELS CLIMATOLOGIES\n", + "test_data_path = 'demo_data/example_data/atm/mo/rlut/ac/'\n", + "# ROOT PATH FOR OBSERVATIONS\n", + "# Note that atm/mo/%(variable)/ac will be added to this\n", + "reference_data_path = 'demo_data/pmpobs_v1.0'\n", + "\n", + "# DIRECTORY WHERE TO PUT RESULTS\n", + "metrics_output_path = os.path.join(\n", + " 'demo_output',\n", + " \"%(case_id)\")\n", + "\n", + "\n" + ] + } + ], "source": [ "with open(\"basic_param.py\") as f:\n", " print(f.read())" @@ -79,7 +115,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "metadata": {}, "outputs": [ { @@ -88,19 +124,20 @@ "CompletedProcess(args=['mean_climate_driver.py', '-p', 'basic_param.py'], returncode=0)" ] }, - "execution_count": 4, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from subprocess import run, PIPE\n", - "run(\"mean_climate_driver.py -p basic_param.py\".split())" + "command_line = \"mean_climate_driver.py -p basic_param.py\"\n", + "run(command_line.split())" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -974,14 +1011,14 @@ " ],\n", " \"provenance\": {\n", " \"platform\": {\n", - " \"OS\": \"Darwin\",\n", - " \"Version\": \"17.7.0\",\n", - " \"Name\": \"loki\"\n", + " \"OS\": \"Linux\",\n", + " \"Version\": \"4.15.0-50-generic\",\n", + " \"Name\": \"drdoom\"\n", " },\n", " \"userId\": \"doutriaux1\",\n", " \"osAccess\": false,\n", - " \"commandLine\": \"/Users/doutriaux1/miniconda3/envs/nightly_py3.6/bin/mean_climate_driver.py -p basic_param.py\",\n", - " \"date\": \"2019-04-26 08:06:11\",\n", + " \"commandLine\": \"/home/doutriaux1/miniconda3/envs/jupyter-vcdat/bin/mean_climate_driver.py -p basic_param.py\",\n", + " \"date\": \"2019-07-10 08:17:51\",\n", " \"conda\": {},\n", " \"packages\": {},\n", " \"openGL\": {\n", @@ -1000,6 +1037,13 @@ "with open(os.path.join(demo_output_directory,\"basicTest/rlut_2.5x2.5_regrid2_linear_metrics.json\")) as f:\n", " print(\"JSON OUTPUT:\\n{}\".format(f.read()))" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -1018,9 +1062,13 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.7" - } + "version": "3.7.3" + }, + "selected_variables": [], + "vcdat_file_path": "", + "vcdat_loaded_variables": [], + "vcdat_variable_info": {} }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/doc/jupyter/Demo/Demo_2_monsoon_wang.ipynb b/doc/jupyter/Demo/Demo_2_monsoon_wang.ipynb index cff601c12..ce241cd71 100644 --- a/doc/jupyter/Demo/Demo_2_monsoon_wang.ipynb +++ b/doc/jupyter/Demo/Demo_2_monsoon_wang.ipynb @@ -10,65 +10,29 @@ "\n", "It is expected that you have downloaded the sample data as demonstrated in [the download notebook](Demo_0_download_data.ipynb)\n", "\n", - "Please edit the path in the following cell to reflect the location on your system where you downloaded the data" + "The following cell reads in the choices you made during the download data step" ] }, { - "cell_type": "code", - "execution_count": 6, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "# This is where you downloaded the sample_data\n", - "demo_data_directory = \"demo_data\"\n", - "# this line is where your output will be stored\n", - "demo_output_directory = \"demo_output\"" + "The PCMDI Median Driver is driven via parameter files reflecting your study and environment\n", + "in his bare minimum" ] }, { "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Preparing parameter file: basic_param.py\n", - "Preparing parameter file: basic_monsoon_wang_param.py\n" - ] - } - ], - "source": [ - "# this prepares the various parameter files used in this demo notebooks to reflect where you downloaded the data\n", - "import glob\n", - "\n", - "# Dictionary for template_files substitutions \n", - "sub_dict = {\n", - " \"INPUT_DIR\": demo_data_directory,\n", - " \"OUTPUT_DIR\": demo_output_directory\n", - "}\n", - "for name in glob.glob(\"*.in\"):\n", - " with open(name) as template_file:\n", - " print(\"Preparing parameter file: {}\".format(name[:-3]))\n", - " template = template_file.read()\n", - " for key in sub_dict:\n", - " template = template.replace(\"${}$\".format(key), sub_dict[key])\n", - " with open(name[:-3], \"w\") as param_file:\n", - " param_file.write(template)" - ] - }, - { - "cell_type": "markdown", + "execution_count": 1, "metadata": {}, + "outputs": [], "source": [ - "The PCMDI Median Driver is driven via parameter files reflecting your study and environment\n", - "in his bare minimum" + "from user_choices import demo_data_directory, demo_output_directory" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 2, "metadata": {}, "outputs": [ { @@ -88,7 +52,7 @@ "pth = os.path.dirname(__file__)\n", "# ROOT PATH FOR MODELS CLIMATOLOGIES\n", "#test_data_path = '../../../tests/monsoon/data/pr_1961_1999_MRI-CGCM3_regrid_MODS.nc'\n", - "test_data_path = 'demo_data/example_data/mo/pr/CMIP5.CMIP.historical.%(model).r1i1p1.mon.pr.atmos.glb-2d-gu.v%(version).0000000.0.nc'\n", + "test_data_path = 'demo_data/example_data/atm/mo/pr/CMIP5.CMIP.historical.%(model).r1i1p1.mon.pr.atmos.glb-2d-gu.v%(version).0000000.0.nc'\n", "# ROOT PATH FOR OBSERVATIONS\n", "# Note that atm/mo/%(variable)/ac will be added to this\n", "#reference_data_path = '../../../tests/monsoon/obs/pr_gpcp_79_07_mseas.nc'\n", @@ -97,6 +61,9 @@ "# DIRECTORY WHERE TO PUT RESULTS\n", "results_dir = 'demo_output/monsoon_wang'\n", "\n", + "# Version for each model\n", + "version = ['20130401', '20130314']\n", + "\n", "# Threshold\n", "threshold = 2.5\n" ] @@ -119,7 +86,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "metadata": {}, "outputs": [ { @@ -128,7 +95,7 @@ "CompletedProcess(args=['mpindex_compute.py', '-p', 'basic_monsoon_wang_param.py'], returncode=0)" ] }, - "execution_count": 4, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -140,7 +107,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -152,46 +119,46 @@ " \"DISCLAIMER\": \"USER-NOTICE: The results in this file were produced with the PMP v1.1 (https://github.com/PCMDI/pcmdi_metrics). They are for research purposes only. They are subject to ongoing quality control and change as the PMP software advances, interpolation methods are modified, observational data sets are updated, problems with model data are corrected, etc. Use of these results for research (presentation, publications, etc.) should reference: Gleckler, P. J., C. Doutriaux, P. J. Durack, K. E. Taylor, Y. Zhang, and D. N. Williams, E. Mason, and J. Servonnat (2016), A more powerful reality test for climate models, Eos, 97, doi:10.1029/2016EO051663. If any problems are uncovered in using these results please contact the PMP development team at pcmdi-metrics@llnl.gov\\n\",\n", " \"REFERENCE\": \"The statistics in this file are based on Wang, B., Kim, HJ., Kikuchi, K. et al. Clim Dyn (2011) 37: 941. doi:10.1007/s00382-010-0877-0\",\n", " \"RESULTS\": {\n", - " \"xa\": {\n", + " \"CNRM-CERFACS.CNRM-CM5-2\": {\n", " \"AllMW\": {\n", - " \"cor\": \"0.768\",\n", - " \"rmsn\": \"0.703\",\n", - " \"threat_score\": \"0.494\"\n", + " \"cor\": \"0.681\",\n", + " \"rmsn\": \"0.788\",\n", + " \"threat_score\": \"100000000000000000000.000\"\n", " },\n", " \"AllM\": {\n", - " \"cor\": \"0.773\",\n", - " \"rmsn\": \"0.697\",\n", - " \"threat_score\": \"0.494\"\n", + " \"cor\": \"0.685\",\n", + " \"rmsn\": \"0.785\",\n", + " \"threat_score\": \"100000000000000000000.000\"\n", " },\n", " \"NAMM\": {\n", - " \"cor\": \"0.778\",\n", - " \"rmsn\": \"0.742\",\n", - " \"threat_score\": \"0.602\"\n", + " \"cor\": \"0.785\",\n", + " \"rmsn\": \"0.629\",\n", + " \"threat_score\": \"100000000000000000000.000\"\n", " },\n", " \"SAMM\": {\n", - " \"cor\": \"0.850\",\n", - " \"rmsn\": \"0.582\",\n", - " \"threat_score\": \"0.489\"\n", + " \"cor\": \"0.618\",\n", + " \"rmsn\": \"0.941\",\n", + " \"threat_score\": \"100000000000000000000.000\"\n", " },\n", " \"NAFM\": {\n", - " \"cor\": \"0.732\",\n", - " \"rmsn\": \"0.763\",\n", - " \"threat_score\": \"0.454\"\n", + " \"cor\": \"0.759\",\n", + " \"rmsn\": \"0.666\",\n", + " \"threat_score\": \"100000000000000000000.000\"\n", " },\n", " \"SAFM\": {\n", - " \"cor\": \"0.857\",\n", - " \"rmsn\": \"0.574\",\n", - " \"threat_score\": \"0.614\"\n", + " \"cor\": \"0.723\",\n", + " \"rmsn\": \"0.762\",\n", + " \"threat_score\": \"100000000000000000000.000\"\n", " },\n", " \"ASM\": {\n", - " \"cor\": \"0.711\",\n", - " \"rmsn\": \"0.776\",\n", - " \"threat_score\": \"0.450\"\n", + " \"cor\": \"0.737\",\n", + " \"rmsn\": \"0.720\",\n", + " \"threat_score\": \"100000000000000000000.000\"\n", " },\n", " \"AUSM\": {\n", - " \"cor\": \"0.895\",\n", - " \"rmsn\": \"0.539\",\n", - " \"threat_score\": \"0.607\"\n", + " \"cor\": \"0.745\",\n", + " \"rmsn\": \"0.753\",\n", + " \"threat_score\": \"100000000000000000000.000\"\n", " }\n", " }\n", " },\n", @@ -203,14 +170,14 @@ " ],\n", " \"provenance\": {\n", " \"platform\": {\n", - " \"OS\": \"Darwin\",\n", - " \"Version\": \"17.7.0\",\n", - " \"Name\": \"loki\"\n", + " \"OS\": \"Linux\",\n", + " \"Version\": \"4.15.0-50-generic\",\n", + " \"Name\": \"drdoom\"\n", " },\n", " \"userId\": \"doutriaux1\",\n", " \"osAccess\": false,\n", - " \"commandLine\": \"/Users/doutriaux1/miniconda3/envs/nightly_py3.6/bin/mpindex_compute.py -p basic_monsoon_wang_param.py\",\n", - " \"date\": \"2019-04-26 10:08:15\",\n", + " \"commandLine\": \"/home/doutriaux1/miniconda3/envs/jupyter-vcdat/bin/mpindex_compute.py -p basic_monsoon_wang_param.py\",\n", + " \"date\": \"2019-07-10 08:22:23\",\n", " \"conda\": {},\n", " \"packages\": {},\n", " \"openGL\": {\n", @@ -254,9 +221,13 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.7" - } + "version": "3.7.3" + }, + "selected_variables": [], + "vcdat_file_path": "", + "vcdat_loaded_variables": [], + "vcdat_variable_info": {} }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/doc/jupyter/Demo/Demo_3_diurnal_cycle.ipynb b/doc/jupyter/Demo/Demo_3_diurnal_cycle.ipynb new file mode 100644 index 000000000..295572b7e --- /dev/null +++ b/doc/jupyter/Demo/Demo_3_diurnal_cycle.ipynb @@ -0,0 +1,641 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Diurnal Cycle\n", + "\n", + "This notebook aims at inroducing new users on how to use the PCDMI diurnal cycle drivers.\n", + "\n", + "This [diagram](../../Diurnal%20Cycle%20Diagram.pdf) shows how various drivers are linked together\n", + "\n", + "It is expected that you have downloaded the sample data as demonstrated in [the download notebook](Demo_0_download_data.ipynb)\n", + "\n", + "Please edit the path in the following cell to reflect the location on your system where you downloaded the data\n", + "\n", + "For this tutorial we will be using 3 years worth of 3 hourly data resmapled to a 5x5 grid" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from user_choices import demo_data_directory, demo_output_directory" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Like all other drivers in the PCMDI Metrics Package, dirunal cycles rely on parameter input files\n", + "\n", + "Our first driver starts from cmip5 data and compute the daily means\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# output directory\n", + "results_dir = 'demo_output/diurnal/nc'\n", + "\n", + "# input directory\n", + "modpath = 'demo_data/example_data/atm/3hr/pr/'\n", + "\n", + "# filenames template\n", + "filename_template = 'pr_3hr_%(model)_%(experiment)_%(realization)_5x5_1997-1999.nc'\n", + "\n", + "# model to use\n", + "model = 'IPSL-CM5A-LR'\n", + "experiment = 'historical'\n", + "realization = 'r1i1p1'\n", + "\n", + "# Month to use\n", + "month = 7\n", + "\n", + "# Period\n", + "firstyear = 1997 # included\n", + "lastyear = 1999 # included\n", + "\n", + "# Number of workers\n", + "num_workers = 4\n", + "\n" + ] + } + ], + "source": [ + "with open(\"basic_diurnal_compute_daily_mean.py\") as f:\n", + " print(f.read())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now to run this simply call the driver\n", + "```\n", + "computeStdOfDailyMeans.py -p basic_diurnal_compute_daily_mean.py\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "CompletedProcess(args=['computeStdOfDailyMeans.py', '-p', 'basic_diurnal_compute_daily_mean.py'], returncode=0)" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from subprocess import run\n", + "run(\"computeStdOfDailyMeans.py -p basic_diurnal_compute_daily_mean.py\".split())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This generates a netcdf file `pr_IPSL-CM5A-LR_Jul_1997-1999_std_of_dailymeans.nc` which contains the daily standard deviation at each cell\n", + "\n", + "Looking at our diagram the next driver to run is the one computing the mean of the standard deviation from daily means over a region of interest." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# output directory\n", + "results_dir = 'demo_output/diurnal/json'\n", + "\n", + "# input directory which is actually the output of previous driver\n", + "modpath = 'demo_output/diurnal/nc'\n", + "\n", + "# filenames template\n", + "filename_template = 'pr_%(model)_Jul_%(firstyear)-%(lastyear)_std_of_dailymeans.nc'\n", + "\n", + "# model to use\n", + "model = 'IPSL-CM5A-LR'\n", + "experiment = 'historical'\n", + "realization = 'r1i1p1'\n", + "\n", + "# Month to use\n", + "month = 7\n", + "\n", + "# Period\n", + "firstyear = 1997 # included\n", + "lastyear = 1999 # included\n", + "\n", + "# Latitudes/longitudes to use\n", + "lat1 = -50.\n", + "lat2 = 50.\n", + "lon1 = 0.\n", + "lon2 = 360.\n", + "\n", + "# Name\n", + "region_name = \"TRMM\"\n", + "\n", + "# Number of workers\n", + "num_workers = 4\n", + "\n" + ] + } + ], + "source": [ + "with open(\"basic_diurnal_std_daily_mean.py\") as f:\n", + " print(f.read())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now to run this simply call the driver\n", + "```\n", + "std_of_dailymeans.py -p basic_diurnal_std_daily_mean.py\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "CompletedProcess(args=['std_of_dailymeans.py', '-p', 'basic_diurnal_std_daily_mean.py'], returncode=0)" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from subprocess import run\n", + "run(\"std_of_dailymeans.py -p basic_diurnal_std_daily_mean.py\".split())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This generates a json file: `pr_Jul_1997_1999_std_of_dailymeans.json`\n", + "\n", + "You coul also append a new region to this json file by overwritting some of our parameters from the command line:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "CompletedProcess(args=['std_of_dailymeans.py', '-p', 'basic_diurnal_std_daily_mean.py', '--region_name=TROPICS', '--lat1=-30.', '--lat2=30.', '--lon1=0.', '--lon2=360', '--append'], returncode=0)" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "run(\"std_of_dailymeans.py -p basic_diurnal_std_daily_mean.py --region_name=TROPICS --lat1=-30. --lat2=30. --lon1=0. --lon2=360 --append\".split())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Looking again at our diagram we can now start again from the original 3 hourly data, and run the *composite* script" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# output directory\n", + "results_dir = 'demo_output/diurnal/nc'\n", + "\n", + "# input directory\n", + "modpath = 'demo_data/example_data/atm/3hr/pr/'\n", + "\n", + "# filenames template\n", + "filename_template = 'pr_3hr_%(model)_%(experiment)_%(realization)_5x5_1997-1999.nc'\n", + "\n", + "# model to use\n", + "model = 'IPSL-CM5A-LR'\n", + "experiment = 'historical'\n", + "realization = 'r1i1p1'\n", + "\n", + "# Month to use\n", + "month = 7\n", + "\n", + "# Period\n", + "firstyear = 1997 # included\n", + "lastyear = 1999 # included\n", + "\n", + "# Number of workers\n", + "num_workers = 4\n", + "\n" + ] + } + ], + "source": [ + "with open(\"basic_diurnal_composite.py\") as f:\n", + " print(f.read())" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "CompletedProcess(args=['compositeDiurnalStatistics.py', '-p', 'basic_diurnal_composite.py'], returncode=0)" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "run(\"compositeDiurnalStatistics.py -p basic_diurnal_composite.py\".split())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This produces 3 output files:\n", + "```\n", + "pr_IPSL-CM5A-LR_Jul_1997-1999_diurnal_avg.nc\n", + "pr_IPSL-CM5A-LR_Jul_1997-1999_diurnal_std.nc\n", + "pr_IPSL-CM5A-LR_LocalSolarTimes.nc\n", + "```\n", + "\n", + "Containing respectively ???, ??? and ???\n", + "\n", + "We can now generate ASCII files for composite diurnal cycle (w/\terror\tbars) at selected grid points using the `fourierDiurnalGridpoints.py` script." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "CompletedProcess(args=['fourierDiurnalGridpoints.py', '-p', 'basic_diurnal_fourier.py'], returncode=0)" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "run(\"fourierDiurnalGridpoints.py -p basic_diurnal_fourier.py\".split())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This produces an ascii file: `pr_Jul_1997-1999_fourierDiurnalGridPoints.asc` \n", + "\n", + "Starting again from the composite results our diagram suggests we now compute the std of hourly values" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# output directory\n", + "results_dir = 'demo_output/diurnal/json'\n", + "\n", + "# input directory which is actually the output of previous driver\n", + "modpath = 'demo_output/diurnal/nc'\n", + "\n", + "# model to use\n", + "model = 'IPSL-CM5A-LR'\n", + "experiment = 'historical'\n", + "realization = 'r1i1p1'\n", + "\n", + "# Month to use\n", + "month = 7\n", + "\n", + "# Period\n", + "firstyear = 1997 # included\n", + "lastyear = 1999 # included\n", + "\n", + "# Latitudes/longitudes to use\n", + "lat1 = -50.\n", + "lat2 = 50.\n", + "lon1 = 0.\n", + "lon2 = 360.\n", + "\n", + "# Number of workers\n", + "num_workers = 4\n", + "\n" + ] + } + ], + "source": [ + "with open(\"basic_diurnal_std_hourly_mean.py\") as f:\n", + " print(f.read())" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "CompletedProcess(args=['std_of_hourlyvalues.py', '-p', 'basic_diurnal_std_hourly_mean.py'], returncode=0)" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "run(\"std_of_hourlyvalues.py -p basic_diurnal_std_hourly_mean.py\".split())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This generated the following file:\n", + " `pr_Jul_1997-1999_std_of_hourlymeans.json`\n", + " \n", + "This file is used in Trenberth\tet\tal.\t(2017)\n", + "Day_to_day\n", + "“intermittency”\tof\t\n", + "hourly values\n", + "(>>variance\tof\tdaily\t\n", + "means)\n", + "Gives\t“error\tbars”\ton\t\n", + "mean\tdiurnal\tcycle\n", + "\n", + "\n", + "Going back to the results of *composite* we can now run: `std_of_meandiurnalcycle.py` which can use the same input parameter file" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# output directory\n", + "results_dir = 'demo_output/diurnal/json'\n", + "\n", + "# input directory which is actually the output of previous driver\n", + "modpath = 'demo_output/diurnal/nc'\n", + "\n", + "# model to use\n", + "model = 'IPSL-CM5A-LR'\n", + "experiment = 'historical'\n", + "realization = 'r1i1p1'\n", + "\n", + "# Month to use\n", + "month = 7\n", + "\n", + "# Period\n", + "firstyear = 1997 # included\n", + "lastyear = 1999 # included\n", + "\n", + "# Latitudes/longitudes to use\n", + "lat1 = -50.\n", + "lat2 = 50.\n", + "lon1 = 0.\n", + "lon2 = 360.\n", + "\n", + "# Number of workers\n", + "num_workers = 4\n", + "\n" + ] + } + ], + "source": [ + "with open(\"basic_diurnal_std_hourly_mean.py\") as f:\n", + " print(f.read())" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "CompletedProcess(args=['std_of_meandiurnalcycle.py', '-p', 'basic_diurnal_std_hourly_mean.py'], returncode=0)" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "run(\"std_of_meandiurnalcycle.py -p basic_diurnal_std_hourly_mean.py\".split())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This generates the following file: `pr_Jul_1997-1999_std_of_meandiurnalcyc.json`\n", + "\n", + "Again starting from the *composite* results let's do the fourier analysis." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# output directory\n", + "results_dir = 'demo_output/diurnal/nc'\n", + "\n", + "# input directory which is actually the output of previous driver\n", + "modpath = 'demo_output/diurnal/nc'\n", + "\n", + "# model to use\n", + "model = 'IPSL-CM5A-LR'\n", + "experiment = 'historical'\n", + "realization = 'r1i1p1'\n", + "\n", + "# Month to use\n", + "month = 7\n", + "\n", + "# Period\n", + "firstyear = 1997 # included\n", + "lastyear = 1999 # included\n", + "\n", + "# Number of workers\n", + "num_workers = 4\n", + "\n" + ] + } + ], + "source": [ + "with open(\"basic_diurnal_fourierAllGrid.py\") as f:\n", + " print(f.read())" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "CompletedProcess(args=['fourierDiurnalAllGrid.py', '-p', 'basic_diurnal_fourierAllGrid.py'], returncode=0)" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "run(\"fourierDiurnalAllGrid.py -p basic_diurnal_fourierAllGrid.py\".split())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This generates 3 files:\n", + "\n", + "```\n", + "pr_IPSL-CM5A-LR_Jul_1997-1999_tmean.nc\n", + "pr_IPSL-CM5A-LR_Jul_1997-1999_S.nc\n", + "pr_IPSL-CM5A-LR_Jul_1997-1999_tS.nc\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now run the last script: `savg_fourierWrappedInOut.py`" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "CompletedProcess(args=['savg_fourierWrappedInOut.py', '-p', 'basic_diurnal_std_hourly_mean.py'], returncode=0)" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "run(\"savg_fourier.py -p basic_diurnal_std_hourly_mean.py\".split())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This creates the follwoing file:\n", + "\n", + "```\n", + "pr_Jul_1997-1999_savg_DiurnalFourier.json\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + }, + "selected_variables": [], + "vcdat_file_path": "", + "vcdat_loaded_variables": [], + "vcdat_variable_info": {} + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/jupyter/Demo/basic_diurnal_composite.py.in b/doc/jupyter/Demo/basic_diurnal_composite.py.in new file mode 100644 index 000000000..ba4b2fbe1 --- /dev/null +++ b/doc/jupyter/Demo/basic_diurnal_composite.py.in @@ -0,0 +1,23 @@ +# output directory +results_dir = '$OUTPUT_DIR$/diurnal/nc' + +# input directory +modpath = '$INPUT_DIR$/example_data/atm/3hr/pr/' + +# filenames template +filename_template = 'pr_3hr_%(model)_%(experiment)_%(realization)_5x5_1997-1999.nc' + +# model to use +model = 'IPSL-CM5A-LR' +experiment = 'historical' +realization = 'r1i1p1' + +# Month to use +month = 7 + +# Period +firstyear = 1997 # included +lastyear = 1999 # included + +# Number of workers +num_workers = 4 diff --git a/doc/jupyter/Demo/basic_diurnal_compute_daily_mean.py.in b/doc/jupyter/Demo/basic_diurnal_compute_daily_mean.py.in new file mode 100644 index 000000000..ba4b2fbe1 --- /dev/null +++ b/doc/jupyter/Demo/basic_diurnal_compute_daily_mean.py.in @@ -0,0 +1,23 @@ +# output directory +results_dir = '$OUTPUT_DIR$/diurnal/nc' + +# input directory +modpath = '$INPUT_DIR$/example_data/atm/3hr/pr/' + +# filenames template +filename_template = 'pr_3hr_%(model)_%(experiment)_%(realization)_5x5_1997-1999.nc' + +# model to use +model = 'IPSL-CM5A-LR' +experiment = 'historical' +realization = 'r1i1p1' + +# Month to use +month = 7 + +# Period +firstyear = 1997 # included +lastyear = 1999 # included + +# Number of workers +num_workers = 4 diff --git a/doc/jupyter/Demo/basic_diurnal_fourier.py.in b/doc/jupyter/Demo/basic_diurnal_fourier.py.in new file mode 100644 index 000000000..961fd5980 --- /dev/null +++ b/doc/jupyter/Demo/basic_diurnal_fourier.py.in @@ -0,0 +1,33 @@ +# output directory +results_dir = '$OUTPUT_DIR$/diurnal/ascii' + +# input directory which is actually the output of previous driver +modpath = '$OUTPUT_DIR$/diurnal/nc' + +# filenames template +filename_template = 'pr_%(model)_Jul_%(firstyear)-%(lastyear)_diurnal_avg.nc' +filename_template_std = 'pr_%(model)_Jul_%(firstyear)-%(lastyear)_diurnal_std.nc' + +# model to use +model = 'IPSL-CM5A-LR' +experiment = 'historical' +realization = 'r1i1p1' + +# Month to use +month = 7 + +# Period +firstyear = 1997 # included +lastyear = 1999 # included + +# Latitudes/longitudes to use +lat1 = -50. +lat2 = 50. +lon1 = 0. +lon2 = 360. + +# Name +region_name = "TRMM" + +# Number of workers +num_workers = 4 diff --git a/doc/jupyter/Demo/basic_diurnal_fourierAllGrid.py.in b/doc/jupyter/Demo/basic_diurnal_fourierAllGrid.py.in new file mode 100644 index 000000000..cac37a090 --- /dev/null +++ b/doc/jupyter/Demo/basic_diurnal_fourierAllGrid.py.in @@ -0,0 +1,20 @@ +# output directory +results_dir = '$OUTPUT_DIR$/diurnal/nc' + +# input directory which is actually the output of previous driver +modpath = '$OUTPUT_DIR$/diurnal/nc' + +# model to use +model = 'IPSL-CM5A-LR' +experiment = 'historical' +realization = 'r1i1p1' + +# Month to use +month = 7 + +# Period +firstyear = 1997 # included +lastyear = 1999 # included + +# Number of workers +num_workers = 4 diff --git a/doc/jupyter/Demo/basic_diurnal_std_daily_mean.py.in b/doc/jupyter/Demo/basic_diurnal_std_daily_mean.py.in new file mode 100644 index 000000000..7351b216f --- /dev/null +++ b/doc/jupyter/Demo/basic_diurnal_std_daily_mean.py.in @@ -0,0 +1,32 @@ +# output directory +results_dir = '$OUTPUT_DIR$/diurnal/json' + +# input directory which is actually the output of previous driver +modpath = '$OUTPUT_DIR$/diurnal/nc' + +# filenames template +filename_template = 'pr_%(model)_Jul_%(firstyear)-%(lastyear)_std_of_dailymeans.nc' + +# model to use +model = 'IPSL-CM5A-LR' +experiment = 'historical' +realization = 'r1i1p1' + +# Month to use +month = 7 + +# Period +firstyear = 1997 # included +lastyear = 1999 # included + +# Latitudes/longitudes to use +lat1 = -50. +lat2 = 50. +lon1 = 0. +lon2 = 360. + +# Name +region_name = "TRMM" + +# Number of workers +num_workers = 4 diff --git a/doc/jupyter/Demo/basic_diurnal_std_hourly_mean.py.in b/doc/jupyter/Demo/basic_diurnal_std_hourly_mean.py.in new file mode 100644 index 000000000..88333bda1 --- /dev/null +++ b/doc/jupyter/Demo/basic_diurnal_std_hourly_mean.py.in @@ -0,0 +1,26 @@ +# output directory +results_dir = '$OUTPUT_DIR$/diurnal/json' + +# input directory which is actually the output of previous driver +modpath = '$OUTPUT_DIR$/diurnal/nc' + +# model to use +model = 'IPSL-CM5A-LR' +experiment = 'historical' +realization = 'r1i1p1' + +# Month to use +month = 7 + +# Period +firstyear = 1997 # included +lastyear = 1999 # included + +# Latitudes/longitudes to use +lat1 = -50. +lat2 = 50. +lon1 = 0. +lon2 = 360. + +# Number of workers +num_workers = 4 diff --git a/doc/jupyter/Demo/data_files.txt b/doc/jupyter/Demo/data_files.txt index e5ed7243a..99e67592c 100644 --- a/doc/jupyter/Demo/data_files.txt +++ b/doc/jupyter/Demo/data_files.txt @@ -5,3 +5,4 @@ bad136e97d7f13dc5af4ac019c8ca9c2 example_data/atm/mo/rlut/ac/CMIP5.historical.A ec61d7bf2f8480f8e7a18af62d61deff example_data/atm/mo/rlut/ac/CMIP5.historical.CSIRO-Mk3-6-0.r1i1p1.mon.rlut.198101-200512.AC.v20190225.nc 9f5938a7750156ef76905f8872fd0aee example_data/atm/mo/pr/CMIP5.CMIP.historical.CNRM-CERFACS.CNRM-CM5-2.r1i1p1.mon.pr.atmos.glb-2d-gu.v20130401.0000000.0.nc 080a39fb6861cb320d29eb5a6dbc9859 example_data/atm/mo/pr/CMIP5.CMIP.historical.NSF-DOE-NCAR.CESM1-WACCM.r1i1p1.mon.pr.atmos.glb-2d-gu.v20130314.0000000.0.nc +eab076619d05c886648f33d507f4a721 example_data/atm/3hr/pr/pr_3hr_IPSL-CM5A-LR_historical_r1i1p1_5x5_1997-1999.nc diff --git a/pcmdi_metrics/diurnal/scripts/compositeDiurnalStatisticsWrapped.py b/pcmdi_metrics/diurnal/scripts/compositeDiurnalStatistics.py similarity index 93% rename from pcmdi_metrics/diurnal/scripts/compositeDiurnalStatisticsWrapped.py rename to pcmdi_metrics/diurnal/scripts/compositeDiurnalStatistics.py index b487a3909..be7d1431c 100755 --- a/pcmdi_metrics/diurnal/scripts/compositeDiurnalStatisticsWrapped.py +++ b/pcmdi_metrics/diurnal/scripts/compositeDiurnalStatistics.py @@ -36,8 +36,12 @@ def compute(params): outunits = 'mm/d' startime = 1.5 # GMT value for starting time-of-day - reverted = template.reverse(os.path.basename(fileName)) - dataname = reverted["model"] + dataname = params.args.model + if dataname is None or dataname.find("*") != -1: + # model not passed or passed as * + reverted = template.reverse(os.path.basename(fileName)) + print("REVERYING", reverted, dataname) + dataname = reverted["model"] if dataname not in args.skip: try: print('Data source:', dataname) @@ -48,10 +52,10 @@ def compute(params): iYear = 0 for year in range(args.firstyear, args.lastyear + 1): print('Year %s:' % year) - startTime = cdtime.comptime(year, month, 1, 1, 30) + startTime = cdtime.comptime(year, month) # Last possible second to get all tpoints finishtime = startTime.add( - 1, cdtime.Month).add(-1.5, cdtime.Hour).add(.1, cdtime.Second) + 1, cdtime.Month).add(-1, cdtime.Minute) print('Reading %s from %s for time interval %s to %s ...' % (varbname, fileName, startTime, finishtime)) # Transient variable stores data for current year's month. tvarb = f(varbname, time=(startTime, finishtime)) @@ -62,16 +66,18 @@ def compute(params): # metadata from first-year file: if year == args.firstyear: tc = tvarb.getTime().asComponentTime() + print("DATA FROM:", tc[0], "to", tc[-1]) day1 = cdtime.comptime(tc[0].year, tc[0].month) + day1 = tc[0] firstday = tvarb( time=( day1, day1.add( - 1, + 1., cdtime.Day), "con")) dimensions = firstday.shape - # print ' Shape = ', dimensions + print(' Shape = ', dimensions) # Number of time points in the selected month for one year N = dimensions[0] nlats = dimensions[1] @@ -101,9 +107,10 @@ def compute(params): for iGMT in range(N): hour = iGMT * deltaH + startime print(' Choosing timepoints with GMT %5.2f ...' % hour) + print("days per mo :", dayspermo) # Transient-variable slice: every Nth tpoint gets all of # the current GMT's tpoints for current year: - tvslice[iGMT] = tvarb[iGMT:tvarb.shape[0]:N] + tvslice[iGMT] = tvarb[iGMT::N] concatenation[iGMT, iYear * dayspermo: (iYear + 1) * diff --git a/pcmdi_metrics/diurnal/scripts/computeStdDailyMeansWrapped.py b/pcmdi_metrics/diurnal/scripts/computeStdOfDailyMeans.py similarity index 95% rename from pcmdi_metrics/diurnal/scripts/computeStdDailyMeansWrapped.py rename to pcmdi_metrics/diurnal/scripts/computeStdOfDailyMeans.py index cab47b55d..87261a8fa 100755 --- a/pcmdi_metrics/diurnal/scripts/computeStdDailyMeansWrapped.py +++ b/pcmdi_metrics/diurnal/scripts/computeStdOfDailyMeans.py @@ -33,8 +33,13 @@ def compute(params): template = populateStringConstructor(args.filename_template, args) template.variable = varbname - reverted = template.reverse(os.path.basename(fileName)) - dataname = reverted["model"] + dataname = params.args.model + if dataname is None or dataname.find("*") != -1: + # model not passed or passed as * + reverted = template.reverse(os.path.basename(fileName)) + dataname = reverted["model"] + print('Data source:', dataname) + print('Opening %s ...' % fileName) if dataname not in args.skip: try: print('Data source:', dataname) diff --git a/pcmdi_metrics/diurnal/scripts/fourierDiurnalAllGridWrapped.py b/pcmdi_metrics/diurnal/scripts/fourierDiurnalAllGrid.py similarity index 100% rename from pcmdi_metrics/diurnal/scripts/fourierDiurnalAllGridWrapped.py rename to pcmdi_metrics/diurnal/scripts/fourierDiurnalAllGrid.py diff --git a/pcmdi_metrics/diurnal/scripts/savg_fourierWrappedInOut.py b/pcmdi_metrics/diurnal/scripts/savg_fourier.py similarity index 100% rename from pcmdi_metrics/diurnal/scripts/savg_fourierWrappedInOut.py rename to pcmdi_metrics/diurnal/scripts/savg_fourier.py diff --git a/pcmdi_metrics/diurnal/scripts/std_of_dailymeansWrappedInOut.py b/pcmdi_metrics/diurnal/scripts/std_of_dailymeans.py similarity index 100% rename from pcmdi_metrics/diurnal/scripts/std_of_dailymeansWrappedInOut.py rename to pcmdi_metrics/diurnal/scripts/std_of_dailymeans.py diff --git a/pcmdi_metrics/diurnal/scripts/std_of_hourlyvaluesWrappedInOut.py b/pcmdi_metrics/diurnal/scripts/std_of_hourlyvalues.py similarity index 100% rename from pcmdi_metrics/diurnal/scripts/std_of_hourlyvaluesWrappedInOut.py rename to pcmdi_metrics/diurnal/scripts/std_of_hourlyvalues.py diff --git a/pcmdi_metrics/diurnal/scripts/std_of_meandiurnalcycWrappedInOut.py b/pcmdi_metrics/diurnal/scripts/std_of_meandiurnalcycle.py similarity index 100% rename from pcmdi_metrics/diurnal/scripts/std_of_meandiurnalcycWrappedInOut.py rename to pcmdi_metrics/diurnal/scripts/std_of_meandiurnalcycle.py diff --git a/pcmdi_metrics/version.py b/pcmdi_metrics/version.py index d3ffe949c..01d356851 100644 --- a/pcmdi_metrics/version.py +++ b/pcmdi_metrics/version.py @@ -1,3 +1,3 @@ -__version__ = 'v1.2' -__git_tag_describe__ = 'v1.2-66-g0ac0e39' -__git_sha1__ = '0ac0e391be7d3b55a185096a2ded8dbbfe65a146' +__version__ = 'v1.2.1' +__git_tag_describe__ = 'v1.2.1-8-g8c046c1' +__git_sha1__ = '8c046c15847c16090ef1b7638d39b3c577b9dc69' diff --git a/recipes/pcmdi_metrics/meta.yaml.in b/recipes/pcmdi_metrics/meta.yaml.in index ac79179ff..da9e037be 100644 --- a/recipes/pcmdi_metrics/meta.yaml.in +++ b/recipes/pcmdi_metrics/meta.yaml.in @@ -18,8 +18,8 @@ requirements: - numpy run: - python {{ python }} - - vcs >=8.0 - - vcsaddons >=8.0 + - vcs >=8.1 + - vcsaddons >=8.1 - cdms2 >=3 - genutil >=8.1 - cdutil >=8.1 @@ -28,6 +28,11 @@ requirements: - numpy - cia - proj4 <5 + - jupyterlab + - nb_conda + - nb_conda_kernels + _ cdms2 <3.1.2.2019 + about: diff --git a/tests/test_pmp_diurnal.py b/tests/test_pmp_diurnal.py index 9981434fd..f8e15db53 100644 --- a/tests/test_pmp_diurnal.py +++ b/tests/test_pmp_diurnal.py @@ -39,18 +39,18 @@ def compare_nc(self,test_name): print("Checking variable {} is correct".format(v)) test = test_out(v) good = good_out(v) - self.assertSame(test,good) + self.assertSame(test, good) - def teestDiurnaliComputeStdDailyMean(self): + def testDiurnaliComputeStdOfDailyMean(self): data_pth = cdat_info.get_sampledata_path() - cmd = '{}computeStdDailyMeansWrapped.py --num_workers=1 --mp {} --rd test_data/results/nc -t "sample_data_pr_%(model).nc" -m7'.format(self.runner, data_pth) + cmd = '{}computeStdOfDailyMeans.py --num_workers=1 --mp {} --rd test_data/results/nc -t "sample_data_pr_%(model).nc" -m7'.format(self.runner, data_pth) p = subprocess.Popen(shlex.split(cmd)) p.communicate() self.compare_nc("results/nc/pr_CMCC_Jul_1999-2005_std_of_dailymeans.nc") - def teestFourierDiurnalAllGridWrapped(self): - cmd = '{}fourierDiurnalAllGridWrapped.py --num_workers=1 --mp tests/diurnal/results/nc --rd test_data/results/nc -m7'.format(self.runner) + def testFourierDiurnalAllGrid(self): + cmd = '{}fourierDiurnalAllGrid.py --num_workers=1 --mp tests/diurnal/results/nc --rd test_data/results/nc -m7'.format(self.runner) p = subprocess.Popen(shlex.split(cmd)) p.communicate() self.compare_nc("results/nc/pr_CMCC_Jul_1999-2005_tmean.nc") @@ -58,7 +58,7 @@ def teestFourierDiurnalAllGridWrapped(self): self.compare_nc("results/nc/pr_CMCC_Jul_1999-2005_S.nc") def testDiurnalStdDailyVariance(self): - self.runJsoner("std_of_dailymeansWrappedInOut.py","pr_Jul_1999_2005_std_of_dailymeans.json","std_of_dailymeans") + self.runJsoner("std_of_dailymeans.py","pr_Jul_1999_2005_std_of_dailymeans.json","std_of_dailymeans") def runJsoner(self,script,json_file,ext): cmd = '{}{} --num_workers=1 --region_name=TROPICS --lat1=-30. --lat2=30. --lon1=0. --lon2=360 --mp tests/diurnal/results/nc --rd test_data/results/jsons -m7 -t "pr_%(model)_%(month)_%(firstyear)-%(lastyear)_{}.nc"'.format(self.runner, script, ext) @@ -77,26 +77,25 @@ def runJsoner(self,script,json_file,ext): good = json.load(good) self.assertEqual(test["RESULTS"],good["RESULTS"]) """ - def teestCompositeDiurnalStatisticsWrapped(self): + def testCompositeDiurnalStatistics(self): data_pth = cdat_info.get_sampledata_path() - cmd = '{}compositeDiurnalStatisticsWrapped.py --num_workers=1 --mp {} --rd test_data/results/nc -t "sample_data_pr_%(model).nc" -m7'.format(self.runner, data_pth) - print("CCOOOMOMMOMONFDFDDFFDSASFGFDSAFGFFGSDFFGFSG", cmd) + cmd = '{}compositeDiurnalStatistics.py --num_workers=1 --mp {} --rd test_data/results/nc -t "sample_data_pr_%(model).nc" -m7'.format(self.runner, data_pth) p = subprocess.Popen(shlex.split(cmd)) p.communicate() self.compare_nc("results/nc/pr_CMCC_Jul_1999-2005_diurnal_avg.nc") self.compare_nc("results/nc/pr_CMCC_Jul_1999-2005_diurnal_std.nc") self.compare_nc("results/nc/pr_CMCC_LocalSolarTimes.nc") - def teestStd_of_hourlyvaluesWrappedInOut(self): - self.runJsoner("std_of_hourlyvaluesWrappedInOut.py","pr_Jul_1999-2005_std_of_hourlymeans.json","diurnal_std") + def testStd_of_hourlyvalues(self): + self.runJsoner("std_of_hourlyvalues.py","pr_Jul_1999-2005_std_of_hourlymeans.json","diurnal_std") - def teestStd_of_meandiurnalcycWrappedInOut(self): - self.runJsoner("std_of_meandiurnalcycWrappedInOut.py","pr_Jul_1999-2005_std_of_meandiurnalcyc.json","diurnal_avg") + def testStd_of_meandiurnalcycle(self): + self.runJsoner("std_of_meandiurnalcycle.py","pr_Jul_1999-2005_std_of_meandiurnalcyc.json","diurnal_avg") - def teestSavg_fourierWrappedInOut(self): - self.runJsoner("savg_fourierWrappedInOut.py","pr_Jul_1999-2005_savg_DiurnalFourier.json","S") + def testSavg_fourier(self): + self.runJsoner("savg_fourier.py","pr_Jul_1999-2005_savg_DiurnalFourier.json","S") - def teestfourierDiurnalGridpoints(self): + def testfourierDiurnalGridpoints(self): cmd = '{}fourierDiurnalGridpoints.py --num_workers=1 --mp tests/diurnal/results/nc --rd test_data/results/ascii'.format(self.runner) p = subprocess.Popen(shlex.split(cmd)) p.communicate()