Skip to content

climate_ref_esmvaltool.diagnostics #

ESMValTool diagnostics.

ClimateAtGlobalWarmingLevels #

Bases: ESMValToolDiagnostic

Calculate climate variables at global warming levels.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/climate_at_global_warming_levels.py
class ClimateAtGlobalWarmingLevels(ESMValToolDiagnostic):
    """
    Calculate climate variables at global warming levels.
    """

    name = "Climate variables at global warming levels"
    slug = "climate-at-global-warming-levels"
    base_recipe = "recipe_calculate_gwl_exceedance_stats.yml"

    variables = (
        "pr",
        "tas",
    )

    matching_facets = (
        "source_id",
        "member_id",
        "grid_label",
        "table_id",
        "variable_id",
    )

    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.CMIP6,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": variables,
                        "experiment_id": (
                            "ssp126",
                            "ssp245",
                            "ssp370",
                            "ssp585",
                        ),
                        "table_id": "Amon",
                    },
                ),
            ),
            group_by=("experiment_id",),
            constraints=(
                AddSupplementaryDataset(
                    supplementary_facets={"experiment_id": "historical"},
                    matching_facets=matching_facets,
                    optional_matching_facets=tuple(),
                ),
                RequireTimerange(
                    group_by=matching_facets,
                    start=PartialDateTime(year=1850, month=1),
                    end=PartialDateTime(year=2100, month=12),
                ),
                RequireFacets(
                    "experiment_id",
                    required_facets=("historical",),
                    group_by=matching_facets,
                ),
                RequireFacets(
                    "variable_id",
                    required_facets=variables,
                    group_by=("experiment_id", "source_id", "member_id", "grid_label", "table_id"),
                ),
                AddSupplementaryDataset.from_defaults("areacella", SourceDatasetType.CMIP6),
            ),
        ),
    )
    facets = ()

    @staticmethod
    def update_recipe(
        recipe: Recipe,
        input_files: dict[SourceDatasetType, pandas.DataFrame],
    ) -> None:
        """Update the recipe."""
        # Set up the datasets
        diagnostics = recipe["diagnostics"]
        for diagnostic in diagnostics.values():
            diagnostic.pop("additional_datasets")
        recipe_variables = dataframe_to_recipe(
            input_files[SourceDatasetType.CMIP6],
            group_by=(
                "source_id",
                "member_id",
                "grid_label",
                "table_id",
                "variable_id",
            ),
        )
        datasets = recipe_variables["tas"]["additional_datasets"]
        datasets = [ds for ds in datasets if ds["exp"] != "historical"]
        for dataset in datasets:
            dataset.pop("timerange")
        recipe["datasets"] = datasets

        # Specify the timeranges
        diagnostics["calculate_gwl_exceedance_years"]["variables"]["tas_anomaly"] = {
            "short_name": "tas",
            "preprocessor": "calculate_anomalies",
            "timerange": "1850/2100",
        }

        diagnostics["gwl_mean_plots_tas"]["variables"]["tas"] = {
            "short_name": "tas",
            "preprocessor": "multi_model_gwl_stats",
            "timerange": "2000/2100",
        }

        diagnostics["gwl_mean_plots_pr"]["variables"]["pr"] = {
            "short_name": "pr",
            "preprocessor": "multi_model_gwl_stats",
            "timerange": "2000/2100",
        }

update_recipe(recipe, input_files) staticmethod #

Update the recipe.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/climate_at_global_warming_levels.py
@staticmethod
def update_recipe(
    recipe: Recipe,
    input_files: dict[SourceDatasetType, pandas.DataFrame],
) -> None:
    """Update the recipe."""
    # Set up the datasets
    diagnostics = recipe["diagnostics"]
    for diagnostic in diagnostics.values():
        diagnostic.pop("additional_datasets")
    recipe_variables = dataframe_to_recipe(
        input_files[SourceDatasetType.CMIP6],
        group_by=(
            "source_id",
            "member_id",
            "grid_label",
            "table_id",
            "variable_id",
        ),
    )
    datasets = recipe_variables["tas"]["additional_datasets"]
    datasets = [ds for ds in datasets if ds["exp"] != "historical"]
    for dataset in datasets:
        dataset.pop("timerange")
    recipe["datasets"] = datasets

    # Specify the timeranges
    diagnostics["calculate_gwl_exceedance_years"]["variables"]["tas_anomaly"] = {
        "short_name": "tas",
        "preprocessor": "calculate_anomalies",
        "timerange": "1850/2100",
    }

    diagnostics["gwl_mean_plots_tas"]["variables"]["tas"] = {
        "short_name": "tas",
        "preprocessor": "multi_model_gwl_stats",
        "timerange": "2000/2100",
    }

    diagnostics["gwl_mean_plots_pr"]["variables"]["pr"] = {
        "short_name": "pr",
        "preprocessor": "multi_model_gwl_stats",
        "timerange": "2000/2100",
    }

ClimateDriversForFire #

Bases: ESMValToolDiagnostic

Calculate diagnostics regarding climate drivers for fire.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/climate_drivers_for_fire.py
class ClimateDriversForFire(ESMValToolDiagnostic):
    """
    Calculate diagnostics regarding climate drivers for fire.
    """

    name = "Climate drivers for fire"
    slug = "climate-drivers-for-fire"
    base_recipe = "ref/recipe_ref_fire.yml"

    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.CMIP6,
            filters=(
                FacetFilter(
                    {
                        "variable_id": ("hurs", "pr", "tas", "tasmax"),
                        "experiment_id": "historical",
                        "table_id": "Amon",
                    }
                ),
                FacetFilter(
                    {
                        "variable_id": ("cVeg", "treeFrac"),
                        "experiment_id": "historical",
                        "table_id": "Lmon",
                    }
                ),
                FacetFilter(
                    {
                        "variable_id": "vegFrac",
                        "experiment_id": "historical",
                        "table_id": "Emon",
                    }
                ),
            ),
            group_by=("source_id", "member_id", "grid_label"),
            constraints=(
                RequireTimerange(
                    group_by=("instance_id",),
                    start=PartialDateTime(2013, 1),
                    end=PartialDateTime(2014, 12),
                ),
                AddSupplementaryDataset.from_defaults("sftlf", SourceDatasetType.CMIP6),
                RequireFacets(
                    "variable_id",
                    (
                        "cVeg",
                        "hurs",
                        "pr",
                        "tas",
                        "tasmax",
                        "sftlf",
                        "treeFrac",
                        "vegFrac",
                    ),
                ),
            ),
        ),
    )
    facets = ()

    @staticmethod
    def update_recipe(
        recipe: Recipe,
        input_files: dict[SourceDatasetType, pandas.DataFrame],
    ) -> None:
        """Update the recipe."""
        recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
        dataset = recipe_variables["cVeg"]["additional_datasets"][0]
        dataset.pop("mip")
        dataset.pop("timerange")
        dataset["start_year"] = 2013
        dataset["end_year"] = 2014
        recipe["datasets"] = [dataset]
        recipe["diagnostics"]["fire_evaluation"]["scripts"]["fire_evaluation"]["remove_confire_files"] = True

update_recipe(recipe, input_files) staticmethod #

Update the recipe.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/climate_drivers_for_fire.py
@staticmethod
def update_recipe(
    recipe: Recipe,
    input_files: dict[SourceDatasetType, pandas.DataFrame],
) -> None:
    """Update the recipe."""
    recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
    dataset = recipe_variables["cVeg"]["additional_datasets"][0]
    dataset.pop("mip")
    dataset.pop("timerange")
    dataset["start_year"] = 2013
    dataset["end_year"] = 2014
    recipe["datasets"] = [dataset]
    recipe["diagnostics"]["fire_evaluation"]["scripts"]["fire_evaluation"]["remove_confire_files"] = True

CloudRadiativeEffects #

Bases: ESMValToolDiagnostic

Plot climatologies and zonal mean profiles of cloud radiative effects (sw + lw) for a dataset.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/cloud_radiative_effects.py
class CloudRadiativeEffects(ESMValToolDiagnostic):
    """
    Plot climatologies and zonal mean profiles of cloud radiative effects (sw + lw) for a dataset.
    """

    name = "Climatologies and zonal mean profiles of cloud radiative effects"
    slug = "cloud-radiative-effects"
    base_recipe = "ref/recipe_ref_cre.yml"

    variables = (
        "rlut",
        "rlutcs",
        "rsut",
        "rsutcs",
    )
    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.CMIP6,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": variables,
                        "experiment_id": "historical",
                        "table_id": "Amon",
                    }
                ),
            ),
            group_by=("source_id", "member_id", "grid_label"),
            constraints=(
                RequireTimerange(
                    group_by=("instance_id",),
                    start=PartialDateTime(1996, 1),
                    end=PartialDateTime(2014, 12),
                ),
                RequireOverlappingTimerange(group_by=("instance_id",)),
                RequireFacets("variable_id", variables),
                AddSupplementaryDataset.from_defaults("areacella", SourceDatasetType.CMIP6),
            ),
        ),
        # TODO: Use CERES-EBAF, ESACCI-CLOUD, and ISCCP-FH from obs4MIPs once available.
    )

    facets = ()
    series = tuple(
        SeriesDefinition(
            file_pattern=f"plot_profiles/plot/variable_vs_lat_{var_name}_*.nc",
            sel={"dim0": 0},  # Select the model.
            dimensions={"variable_id": var_name, "statistic": "zonal mean"},
            values_name=var_name,
            index_name="lat",
            attributes=[],
        )
        for var_name in ["lwcre", "swcre"]
    ) + tuple(
        SeriesDefinition(
            file_pattern=f"plot_profiles/plot/variable_vs_lat_{var_name}_*.nc",
            sel={"dim0": i},  # Select the observation.
            dimensions={"variable_id": var_name, "statistic": "zonal mean", "reference_source_id": source_id},
            values_name=var_name,
            index_name="lat",
            attributes=[],
        )
        for var_name in ["lwcre", "swcre"]
        for i, source_id in enumerate(
            ["CERES-EBAF-Ed4.2", "ESACCI-CLOUD-AVHRR-AMPM-fv3.0", "ISCCP-FH"], start=1
        )
    )

    @staticmethod
    def update_recipe(recipe: Recipe, input_files: dict[SourceDatasetType, pandas.DataFrame]) -> None:
        """Update the recipe."""
        recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
        recipe_variables = {k: v for k, v in recipe_variables.items() if k != "areacella"}

        datasets = recipe_variables["rsut"]["additional_datasets"]
        for dataset in datasets:
            dataset.pop("timerange")
        recipe["datasets"] = datasets

update_recipe(recipe, input_files) staticmethod #

Update the recipe.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/cloud_radiative_effects.py
@staticmethod
def update_recipe(recipe: Recipe, input_files: dict[SourceDatasetType, pandas.DataFrame]) -> None:
    """Update the recipe."""
    recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
    recipe_variables = {k: v for k, v in recipe_variables.items() if k != "areacella"}

    datasets = recipe_variables["rsut"]["additional_datasets"]
    for dataset in datasets:
        dataset.pop("timerange")
    recipe["datasets"] = datasets

CloudScatterplotCliTa #

Bases: ESMValToolDiagnostic

Scatterplot of cli vs ta.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/cloud_scatterplots.py
class CloudScatterplotCliTa(ESMValToolDiagnostic):
    """
    Scatterplot of cli vs ta.
    """

    name = "Scatterplots of two cloud-relevant variables (cli vs ta)"
    slug = "cloud-scatterplots-cli-ta"
    base_recipe = "ref/recipe_ref_scatterplot.yml"
    facets = ()
    data_requirements = get_cmip6_data_requirements(("cli", "ta"))
    update_recipe = partial(update_recipe, var_x="cli", var_y="ta")

CloudScatterplotCliviLwcre #

Bases: ESMValToolDiagnostic

Scatterplot of clivi vs lwcre.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/cloud_scatterplots.py
class CloudScatterplotCliviLwcre(ESMValToolDiagnostic):
    """
    Scatterplot of clivi vs lwcre.
    """

    name = "Scatterplots of two cloud-relevant variables (clivi vs lwcre)"
    slug = "cloud-scatterplots-clivi-lwcre"
    base_recipe = "ref/recipe_ref_scatterplot.yml"
    facets = ()
    data_requirements = get_cmip6_data_requirements(("clivi", "rlut", "rlutcs"))
    update_recipe = partial(update_recipe, var_x="clivi", var_y="lwcre")

CloudScatterplotCltSwcre #

Bases: ESMValToolDiagnostic

Scatterplot of clt vs swcre.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/cloud_scatterplots.py
class CloudScatterplotCltSwcre(ESMValToolDiagnostic):
    """
    Scatterplot of clt vs swcre.
    """

    name = "Scatterplots of two cloud-relevant variables (clt vs swcre)"
    slug = "cloud-scatterplots-clt-swcre"
    base_recipe = "ref/recipe_ref_scatterplot.yml"
    facets = ()
    data_requirements = get_cmip6_data_requirements(("clt", "rsut", "rsutcs"))
    update_recipe = partial(update_recipe, var_x="clt", var_y="swcre")

CloudScatterplotClwviPr #

Bases: ESMValToolDiagnostic

Scatterplot of clwvi vs pr.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/cloud_scatterplots.py
class CloudScatterplotClwviPr(ESMValToolDiagnostic):
    """
    Scatterplot of clwvi vs pr.
    """

    name = "Scatterplots of two cloud-relevant variables (clwvi vs pr)"
    slug = "cloud-scatterplots-clwvi-pr"
    base_recipe = "ref/recipe_ref_scatterplot.yml"
    facets = ()
    data_requirements = get_cmip6_data_requirements(("clwvi", "pr"))
    update_recipe = partial(update_recipe, var_x="clwvi", var_y="pr")

CloudScatterplotsReference #

Bases: ESMValToolDiagnostic

Reference scatterplots of two cloud-relevant variables.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/cloud_scatterplots.py
class CloudScatterplotsReference(ESMValToolDiagnostic):
    """
    Reference scatterplots of two cloud-relevant variables.
    """

    name = "Reference scatterplots of two cloud-relevant variables"
    slug = "cloud-scatterplots-reference"
    base_recipe = "ref/recipe_ref_scatterplot.yml"
    facets = ()
    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.obs4MIPs,
            filters=(
                FacetFilter(
                    facets={
                        "source_id": ("ERA-5",),
                        "variable_id": ("ta",),
                    },
                ),
            ),
            group_by=("instance_id",),
            constraints=(
                RequireTimerange(
                    group_by=("instance_id",),
                    start=PartialDateTime(2007, 1),
                    end=PartialDateTime(2014, 12),
                ),
            ),
            # TODO: Add obs4MIPs datasets once available and working:
            #
            # obs4MIPs datasets with issues:
            # - GPCP-V2.3: pr
            # - CERES-EBAF-4-2: rlut, rlutcs, rsut, rsutcs
            #
            # Unsure if available on obs4MIPs:
            # - AVHRR-AMPM-fv3.0: clivi, clwvi
            # - ESACCI-CLOUD: clt
            # - CALIPSO-ICECLOUD: cli
            #
            # Related issues:
            # - https://github.com/Climate-REF/climate-ref/issues/260
            # - https://github.com/esMValGroup/esMValCore/issues/2712
            # - https://github.com/esMValGroup/esMValCore/issues/2711
            # - https://github.com/sciTools/iris/issues/6411
        ),
    )

    @staticmethod
    def update_recipe(
        recipe: Recipe,
        input_files: dict[SourceDatasetType, pandas.DataFrame],
    ) -> None:
        """Update the recipe."""
        recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.obs4MIPs])
        recipe["diagnostics"] = {k: v for k, v in recipe["diagnostics"].items() if k.endswith("_ref")}

        era5_dataset = recipe_variables["ta"]["additional_datasets"][0]
        era5_dataset["timerange"] = "2007/2015"  # Use the same timerange as for the other variable.
        era5_dataset["alias"] = era5_dataset["dataset"]
        diagnostic = recipe["diagnostics"]["plot_joint_cli_ta_ref"]
        diagnostic["variables"]["ta"]["additional_datasets"] = [era5_dataset]
        suptitle = "CALIPSO-ICECLOUD / {dataset} {timerange}".format(**era5_dataset)
        diagnostic["scripts"]["plot"]["suptitle"] = suptitle
        diagnostic["scripts"]["plot"]["plot_filename"] = (
            f"jointplot_cli_ta_{suptitle.replace(' ', '_').replace('/', '-')}"
        )

        # Use the correct obs4MIPs dataset name for dataset that cannot be ingested
        # https://github.com/Climate-REF/climate-ref/issues/260.
        diagnostic = recipe["diagnostics"]["plot_joint_clwvi_pr_ref"]
        diagnostic["variables"]["pr"]["additional_datasets"] = [
            {
                "dataset": "GPCP-V2.3",
                "project": "obs4MIPs",
                "alias": "GPCP-SG",
                "timerange": "1992/2016",
            }
        ]

update_recipe(recipe, input_files) staticmethod #

Update the recipe.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/cloud_scatterplots.py
@staticmethod
def update_recipe(
    recipe: Recipe,
    input_files: dict[SourceDatasetType, pandas.DataFrame],
) -> None:
    """Update the recipe."""
    recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.obs4MIPs])
    recipe["diagnostics"] = {k: v for k, v in recipe["diagnostics"].items() if k.endswith("_ref")}

    era5_dataset = recipe_variables["ta"]["additional_datasets"][0]
    era5_dataset["timerange"] = "2007/2015"  # Use the same timerange as for the other variable.
    era5_dataset["alias"] = era5_dataset["dataset"]
    diagnostic = recipe["diagnostics"]["plot_joint_cli_ta_ref"]
    diagnostic["variables"]["ta"]["additional_datasets"] = [era5_dataset]
    suptitle = "CALIPSO-ICECLOUD / {dataset} {timerange}".format(**era5_dataset)
    diagnostic["scripts"]["plot"]["suptitle"] = suptitle
    diagnostic["scripts"]["plot"]["plot_filename"] = (
        f"jointplot_cli_ta_{suptitle.replace(' ', '_').replace('/', '-')}"
    )

    # Use the correct obs4MIPs dataset name for dataset that cannot be ingested
    # https://github.com/Climate-REF/climate-ref/issues/260.
    diagnostic = recipe["diagnostics"]["plot_joint_clwvi_pr_ref"]
    diagnostic["variables"]["pr"]["additional_datasets"] = [
        {
            "dataset": "GPCP-V2.3",
            "project": "obs4MIPs",
            "alias": "GPCP-SG",
            "timerange": "1992/2016",
        }
    ]

ENSOBasicClimatology #

Bases: ESMValToolDiagnostic

Calculate the ENSO CLIVAR metrics - background climatology.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/enso.py
class ENSOBasicClimatology(ESMValToolDiagnostic):
    """
    Calculate the ENSO CLIVAR metrics - background climatology.
    """

    name = "ENSO Basic Climatology"
    slug = "enso-basic-climatology"
    base_recipe = "ref/recipe_enso_basicclimatology.yml"

    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.CMIP6,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": ("pr", "tauu"),
                        "experiment_id": "historical",
                        "table_id": "Amon",
                    },
                ),
                FacetFilter(
                    facets={
                        "variable_id": "tos",
                        "experiment_id": "historical",
                        "table_id": "Omon",
                    },
                ),
            ),
            group_by=("source_id", "member_id", "grid_label"),
            constraints=(
                RequireContiguousTimerange(group_by=("instance_id",)),
                RequireOverlappingTimerange(group_by=("instance_id",)),
                RequireFacets(
                    "variable_id",
                    (
                        "pr",
                        "tauu",
                        "tos",
                    ),
                ),
            ),
        ),
    )
    facets = ()

    series = (
        tuple(
            SeriesDefinition(
                file_pattern=f"diagnostic_metrics/plot_script/{source_id}_eq_{var_name}_bias.nc",
                dimensions=(
                    {
                        "statistic": (
                            f"zonal bias in the time-mean {var_name} structure across the equatorial Pacific"
                        ),
                    }
                    | ({} if source_id == "{source_id}" else {"reference_source_id": source_id})
                ),
                values_name="tos" if var_name == "sst" else var_name,
                index_name="lon",
                attributes=[],
            )
            for var_name in ("pr", "sst", "tauu")
            for source_id in ("{source_id}", "GPCP-V2.3", "TROPFLUX")
        )
        + tuple(
            SeriesDefinition(
                file_pattern=f"diagnostic_metrics/plot_script/{{source_id}}_eq_{var_name}_seacycle.nc",
                dimensions=(
                    {
                        "statistic": (
                            "zonal bias in the amplitude of the mean seasonal cycle of "
                            f"{var_name} in the equatorial Pacific"
                        ),
                    }
                    | ({} if source_id == "{source_id}" else {"reference_source_id": source_id})
                ),
                values_name="tos" if var_name == "sst" else var_name,
                index_name="lon",
                attributes=[],
            )
            for var_name in ("pr", "sst", "tauu")
            for source_id in ("{source_id}", "GPCP-V2.3", "TROPFLUX")
        )
        + tuple(
            SeriesDefinition(
                file_pattern="diagnostic_metrics/plot_script/{source_id}_pr_double.nc",
                dimensions=(
                    {
                        "statistic": (
                            "meridional bias in the time-mean pr structure across the eastern Pacific"
                        ),
                    }
                    | ({} if source_id == "{source_id}" else {"reference_source_id": source_id})
                ),
                values_name="pr",
                index_name="lat",
                attributes=[],
            )
            for source_id in ("{source_id}", "GPCP-V2.3")
        )
        + tuple(
            SeriesDefinition(
                file_pattern="diagnostic_metrics/plot_script/*_pr_double_seacycle.nc",
                dimensions=(
                    {
                        "statistic": (
                            "meridional bias in the amplitude of the mean seasonal "
                            "pr cycle in the eastern Pacific"
                        ),
                    }
                    | ({} if source_id == "{source_id}" else {"reference_source_id": source_id})
                ),
                values_name="pr",
                index_name="lat",
                attributes=[],
            )
            for source_id in ("{source_id}", "GPCP-V2.3")
        )
    )

    @staticmethod
    def update_recipe(
        recipe: Recipe,
        input_files: dict[SourceDatasetType, pandas.DataFrame],
    ) -> None:
        """Update the recipe."""
        recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
        recipe.pop("datasets")
        for diagnostic in recipe["diagnostics"].values():
            for variable in diagnostic["variables"].values():
                variable["additional_datasets"].extend(
                    recipe_variables[variable["short_name"]]["additional_datasets"]
                )

update_recipe(recipe, input_files) staticmethod #

Update the recipe.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/enso.py
@staticmethod
def update_recipe(
    recipe: Recipe,
    input_files: dict[SourceDatasetType, pandas.DataFrame],
) -> None:
    """Update the recipe."""
    recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
    recipe.pop("datasets")
    for diagnostic in recipe["diagnostics"].values():
        for variable in diagnostic["variables"].values():
            variable["additional_datasets"].extend(
                recipe_variables[variable["short_name"]]["additional_datasets"]
            )

ENSOCharacteristics #

Bases: ESMValToolDiagnostic

Calculate the ENSO CLIVAR metrics - basic ENSO characteristics.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/enso.py
class ENSOCharacteristics(ESMValToolDiagnostic):
    """
    Calculate the ENSO CLIVAR metrics - basic ENSO characteristics.
    """

    name = "ENSO Characteristics"
    slug = "enso-characteristics"
    base_recipe = "ref/recipe_enso_characteristics.yml"

    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.CMIP6,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": "tos",
                        "experiment_id": "historical",
                        "table_id": "Omon",
                    },
                ),
            ),
            group_by=("source_id", "member_id", "grid_label"),
            constraints=(
                RequireContiguousTimerange(group_by=("instance_id",)),
                RequireOverlappingTimerange(group_by=("instance_id",)),
                AddSupplementaryDataset.from_defaults("areacello", SourceDatasetType.CMIP6),
                RequireFacets("variable_id", ("tos", "areacello")),
            ),
        ),
    )
    facets = ("grid_label", "member_id", "source_id", "region", "metric")
    # ENSO pattern and lifecycle are series, but the ESMValTool diagnostic
    # script does not save the values used in the figure.
    series = tuple()

    @staticmethod
    def update_recipe(
        recipe: Recipe,
        input_files: dict[SourceDatasetType, pandas.DataFrame],
    ) -> None:
        """Update the recipe."""
        recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
        recipe["datasets"] = recipe_variables["tos"]["additional_datasets"]
        # TODO: update the observational data requirement once available on ESGF.
        # Observations - use only one per run
        recipe["datasets"].append(
            # {
            #     "dataset": "NOAA-ERSSTv5",
            #     "version": "v5",
            #     "project": "OBS6",
            #     "type": "reanaly",
            #     "tier": 2,
            # }
            {
                "dataset": "TROPFLUX",
                "version": "v1",
                "project": "OBS6",
                "type": "reanaly",
                "tier": 2,
            }
        )

    @staticmethod
    def format_result(
        result_dir: Path,
        execution_dataset: ExecutionDatasetCollection,
        metric_args: MetricBundleArgs,
        output_args: OutputBundleArgs,
    ) -> tuple[CMECMetric, CMECOutput]:
        """Format the result."""
        metrics = pd.read_csv(
            result_dir / "work" / "diagnostic_metrics" / "plot_script" / "matrix.csv",
            names=["dataset", "metric_name", "metric_value"],
        )

        # Update the diagnostic bundle arguments with the computed diagnostics.
        metric_args[MetricCV.DIMENSIONS.value] = {
            "json_structure": [
                "region",
                "metric",
            ],
            "region": {"global": {}},
            "metric": {metric: {} for metric in metrics.metric_name},
        }
        metric_args[MetricCV.RESULTS.value] = {
            "global": {
                metric_name: metric_value
                for metric_name, metric_value in zip(
                    metrics.metric_name,
                    metrics.metric_value,
                )
            },
        }

        return CMECMetric.model_validate(metric_args), CMECOutput.model_validate(output_args)

format_result(result_dir, execution_dataset, metric_args, output_args) staticmethod #

Format the result.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/enso.py
@staticmethod
def format_result(
    result_dir: Path,
    execution_dataset: ExecutionDatasetCollection,
    metric_args: MetricBundleArgs,
    output_args: OutputBundleArgs,
) -> tuple[CMECMetric, CMECOutput]:
    """Format the result."""
    metrics = pd.read_csv(
        result_dir / "work" / "diagnostic_metrics" / "plot_script" / "matrix.csv",
        names=["dataset", "metric_name", "metric_value"],
    )

    # Update the diagnostic bundle arguments with the computed diagnostics.
    metric_args[MetricCV.DIMENSIONS.value] = {
        "json_structure": [
            "region",
            "metric",
        ],
        "region": {"global": {}},
        "metric": {metric: {} for metric in metrics.metric_name},
    }
    metric_args[MetricCV.RESULTS.value] = {
        "global": {
            metric_name: metric_value
            for metric_name, metric_value in zip(
                metrics.metric_name,
                metrics.metric_value,
            )
        },
    }

    return CMECMetric.model_validate(metric_args), CMECOutput.model_validate(output_args)

update_recipe(recipe, input_files) staticmethod #

Update the recipe.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/enso.py
@staticmethod
def update_recipe(
    recipe: Recipe,
    input_files: dict[SourceDatasetType, pandas.DataFrame],
) -> None:
    """Update the recipe."""
    recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
    recipe["datasets"] = recipe_variables["tos"]["additional_datasets"]
    # TODO: update the observational data requirement once available on ESGF.
    # Observations - use only one per run
    recipe["datasets"].append(
        # {
        #     "dataset": "NOAA-ERSSTv5",
        #     "version": "v5",
        #     "project": "OBS6",
        #     "type": "reanaly",
        #     "tier": 2,
        # }
        {
            "dataset": "TROPFLUX",
            "version": "v1",
            "project": "OBS6",
            "type": "reanaly",
            "tier": 2,
        }
    )

EquilibriumClimateSensitivity #

Bases: ESMValToolDiagnostic

Calculate the global mean equilibrium climate sensitivity for a dataset.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/ecs.py
class EquilibriumClimateSensitivity(ESMValToolDiagnostic):
    """
    Calculate the global mean equilibrium climate sensitivity for a dataset.
    """

    name = "Equilibrium Climate Sensitivity"
    slug = "equilibrium-climate-sensitivity"
    base_recipe = "recipe_ecs.yml"

    variables = (
        "rlut",
        "rsdt",
        "rsut",
        "tas",
    )
    experiments = (
        "abrupt-4xCO2",
        "piControl",
    )
    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.CMIP6,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": variables,
                        "experiment_id": experiments,
                        "table_id": "Amon",
                    },
                ),
            ),
            group_by=("source_id", "member_id", "grid_label"),
            constraints=(
                RequireContiguousTimerange(group_by=("instance_id",)),
                RequireOverlappingTimerange(group_by=("instance_id",)),
                RequireFacets(
                    "variable_id",
                    required_facets=variables,
                    group_by=("source_id", "member_id", "grid_label", "experiment_id"),
                ),
                RequireFacets(
                    "experiment_id",
                    required_facets=experiments,
                    group_by=("source_id", "member_id", "grid_label", "variable_id"),
                ),
                AddSupplementaryDataset.from_defaults("areacella", SourceDatasetType.CMIP6),
            ),
        ),
    )
    facets = ("grid_label", "member_id", "source_id", "region", "metric")
    series = (
        SeriesDefinition(
            file_pattern="ecs/calculate/ecs_regression_*.nc",
            dimensions={
                "statistic": ("global annual mean anomaly of rtnt vs tas"),
            },
            values_name="rtnt_anomaly",
            index_name="tas_anomaly",
            attributes=[],
        ),
    )

    @staticmethod
    def update_recipe(
        recipe: Recipe,
        input_files: dict[SourceDatasetType, pandas.DataFrame],
    ) -> None:
        """Update the recipe."""
        # Only run the diagnostic that computes ECS for a single model.
        recipe["diagnostics"] = {
            "ecs": {
                "description": "Calculate ECS.",
                "variables": {
                    "tas": {
                        "preprocessor": "spatial_mean",
                    },
                    "rtnt": {
                        "preprocessor": "spatial_mean",
                        "derive": True,
                    },
                },
                "scripts": {
                    "calculate": {
                        "script": "climate_metrics/ecs.py",
                        "calculate_mmm": False,
                    },
                },
            },
        }

        # Prepare updated datasets section in recipe. It contains two
        # datasets, one for the "abrupt-4xCO2" and one for the "piControl"
        # experiment.
        recipe_variables = dataframe_to_recipe(
            input_files[SourceDatasetType.CMIP6],
            equalize_timerange=True,
        )
        recipe["datasets"] = recipe_variables["tas"]["additional_datasets"]

        # Remove keys from the recipe that are only used for YAML anchors
        keys_to_remove = [
            "CMIP5_RTMT",
            "CMIP6_RTMT",
            "CMIP5_RTNT",
            "CMIP6_RTNT",
            "ECS_SCRIPT",
            "SCATTERPLOT",
        ]
        for key in keys_to_remove:
            recipe.pop(key, None)

    @staticmethod
    def format_result(
        result_dir: Path,
        execution_dataset: ExecutionDatasetCollection,
        metric_args: MetricBundleArgs,
        output_args: OutputBundleArgs,
    ) -> tuple[CMECMetric, CMECOutput]:
        """Format the result."""
        ecs_ds = xarray.open_dataset(result_dir / "work" / "ecs" / "calculate" / "ecs.nc")
        ecs = float(fillvalues_to_nan(ecs_ds["ecs"].values)[0])
        lambda_ds = xarray.open_dataset(result_dir / "work" / "ecs" / "calculate" / "lambda.nc")
        lambda_ = float(fillvalues_to_nan(lambda_ds["lambda"].values)[0])

        # Update the diagnostic bundle arguments with the computed diagnostics.
        metric_args[MetricCV.DIMENSIONS.value] = {
            MetricCV.JSON_STRUCTURE.value: [
                "region",
                "metric",
            ],
            "region": {"global": {}},
            "metric": {"ecs": {}, "lambda": {}},
        }
        metric_args[MetricCV.RESULTS.value] = {
            "global": {
                "ecs": ecs,
                "lambda": lambda_,
            },
        }

        return CMECMetric.model_validate(metric_args), CMECOutput.model_validate(output_args)

format_result(result_dir, execution_dataset, metric_args, output_args) staticmethod #

Format the result.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/ecs.py
@staticmethod
def format_result(
    result_dir: Path,
    execution_dataset: ExecutionDatasetCollection,
    metric_args: MetricBundleArgs,
    output_args: OutputBundleArgs,
) -> tuple[CMECMetric, CMECOutput]:
    """Format the result."""
    ecs_ds = xarray.open_dataset(result_dir / "work" / "ecs" / "calculate" / "ecs.nc")
    ecs = float(fillvalues_to_nan(ecs_ds["ecs"].values)[0])
    lambda_ds = xarray.open_dataset(result_dir / "work" / "ecs" / "calculate" / "lambda.nc")
    lambda_ = float(fillvalues_to_nan(lambda_ds["lambda"].values)[0])

    # Update the diagnostic bundle arguments with the computed diagnostics.
    metric_args[MetricCV.DIMENSIONS.value] = {
        MetricCV.JSON_STRUCTURE.value: [
            "region",
            "metric",
        ],
        "region": {"global": {}},
        "metric": {"ecs": {}, "lambda": {}},
    }
    metric_args[MetricCV.RESULTS.value] = {
        "global": {
            "ecs": ecs,
            "lambda": lambda_,
        },
    }

    return CMECMetric.model_validate(metric_args), CMECOutput.model_validate(output_args)

update_recipe(recipe, input_files) staticmethod #

Update the recipe.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/ecs.py
@staticmethod
def update_recipe(
    recipe: Recipe,
    input_files: dict[SourceDatasetType, pandas.DataFrame],
) -> None:
    """Update the recipe."""
    # Only run the diagnostic that computes ECS for a single model.
    recipe["diagnostics"] = {
        "ecs": {
            "description": "Calculate ECS.",
            "variables": {
                "tas": {
                    "preprocessor": "spatial_mean",
                },
                "rtnt": {
                    "preprocessor": "spatial_mean",
                    "derive": True,
                },
            },
            "scripts": {
                "calculate": {
                    "script": "climate_metrics/ecs.py",
                    "calculate_mmm": False,
                },
            },
        },
    }

    # Prepare updated datasets section in recipe. It contains two
    # datasets, one for the "abrupt-4xCO2" and one for the "piControl"
    # experiment.
    recipe_variables = dataframe_to_recipe(
        input_files[SourceDatasetType.CMIP6],
        equalize_timerange=True,
    )
    recipe["datasets"] = recipe_variables["tas"]["additional_datasets"]

    # Remove keys from the recipe that are only used for YAML anchors
    keys_to_remove = [
        "CMIP5_RTMT",
        "CMIP6_RTMT",
        "CMIP5_RTNT",
        "CMIP6_RTNT",
        "ECS_SCRIPT",
        "SCATTERPLOT",
    ]
    for key in keys_to_remove:
        recipe.pop(key, None)

GlobalMeanTimeseries #

Bases: ESMValToolDiagnostic

Calculate the annual mean global mean timeseries for a dataset.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/example.py
class GlobalMeanTimeseries(ESMValToolDiagnostic):
    """
    Calculate the annual mean global mean timeseries for a dataset.
    """

    name = "Global Mean Timeseries"
    slug = "global-mean-timeseries"
    base_recipe = "examples/recipe_python.yml"

    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.CMIP6,
            filters=(FacetFilter(facets={"variable_id": ("tas",)}),),
            group_by=("source_id", "experiment_id", "member_id", "table_id", "variable_id", "grid_label"),
            constraints=(
                RequireContiguousTimerange(group_by=("instance_id",)),
                AddSupplementaryDataset.from_defaults("areacella", SourceDatasetType.CMIP6),
            ),
        ),
    )

    facets = ()
    series = (
        SeriesDefinition(
            file_pattern="timeseries/script1/*.nc",
            dimensions={"statistic": "tas annual global mean"},
            values_name="tas",
            index_name="time",
            attributes=[],
        ),
    )

    @staticmethod
    def update_recipe(
        recipe: Recipe,
        input_files: dict[SourceDatasetType, pandas.DataFrame],
    ) -> None:
        """Update the recipe."""
        # Clear unwanted elements from the recipe.
        recipe["datasets"].clear()
        recipe["diagnostics"].pop("map")
        variables = recipe["diagnostics"]["timeseries"]["variables"]
        variables.clear()

        # Prepare updated variables section in recipe.
        recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
        recipe_variables = {k: v for k, v in recipe_variables.items() if k != "areacella"}
        for variable in recipe_variables.values():
            variable["preprocessor"] = "annual_mean_global"
            variable["caption"] = "Annual global mean {long_name} according to {dataset}."

        # Populate recipe with new variables/datasets.
        variables.update(recipe_variables)

update_recipe(recipe, input_files) staticmethod #

Update the recipe.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/example.py
@staticmethod
def update_recipe(
    recipe: Recipe,
    input_files: dict[SourceDatasetType, pandas.DataFrame],
) -> None:
    """Update the recipe."""
    # Clear unwanted elements from the recipe.
    recipe["datasets"].clear()
    recipe["diagnostics"].pop("map")
    variables = recipe["diagnostics"]["timeseries"]["variables"]
    variables.clear()

    # Prepare updated variables section in recipe.
    recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
    recipe_variables = {k: v for k, v in recipe_variables.items() if k != "areacella"}
    for variable in recipe_variables.values():
        variable["preprocessor"] = "annual_mean_global"
        variable["caption"] = "Annual global mean {long_name} according to {dataset}."

    # Populate recipe with new variables/datasets.
    variables.update(recipe_variables)

RegionalHistoricalAnnualCycle #

Bases: ESMValToolDiagnostic

Plot regional historical annual cycle of climate variables.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/regional_historical_changes.py
class RegionalHistoricalAnnualCycle(ESMValToolDiagnostic):
    """
    Plot regional historical annual cycle of climate variables.
    """

    name = "Regional historical annual cycle of climate variables"
    slug = "regional-historical-annual-cycle"
    base_recipe = "ref/recipe_ref_annual_cycle_region.yml"

    variables = (
        "hus",
        "pr",
        "psl",
        "tas",
        "ua",
    )

    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.CMIP6,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": variables,
                        "experiment_id": "historical",
                        "table_id": "Amon",
                    },
                ),
            ),
            group_by=("source_id", "member_id", "grid_label"),
            constraints=(
                RequireTimerange(
                    group_by=("instance_id",),
                    start=PartialDateTime(1980, 1),
                    end=PartialDateTime(2009, 12),
                ),
                RequireFacets("variable_id", variables),
                AddSupplementaryDataset.from_defaults("areacella", SourceDatasetType.CMIP6),
            ),
        ),
        DataRequirement(
            source_type=SourceDatasetType.obs4MIPs,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": (
                            "psl",
                            "ua",
                        ),
                        "source_id": "ERA-5",
                        "frequency": "mon",
                    },
                ),
            ),
            group_by=("source_id",),
            constraints=(
                RequireTimerange(
                    group_by=("instance_id",),
                    start=PartialDateTime(1980, 1),
                    end=PartialDateTime(2009, 12),
                ),
                RequireFacets("variable_id", ("psl", "ua")),
            ),
            # TODO: Add obs4MIPs datasets once available and working:
            #
            # obs4MIPs dataset that cannot be ingested (https://github.com/Climate-REF/climate-ref/issues/260):
            # - GPCP-V2.3: pr
            #
            # Not yet available on obs4MIPs:
            # - ERA5: hus
            # - HadCRUT5_ground_5.0.1.0-analysis: tas
        ),
    )

    facets = ()
    series = tuple(
        SeriesDefinition(
            file_pattern=f"anncyc-{region}/allplots/*_{var_name}_*.nc",
            sel={"dim0": 0},  # Select the model and not the observation.
            dimensions=(
                {
                    "region": region,
                    "variable_id": var_name,
                    "statistic": "mean",
                }
                | ({} if i == 0 else {"reference_source_id": REFERENCE_DATASETS[var_name]})
            ),
            values_name=var_name,
            index_name="month_number",
            attributes=[],
        )
        for var_name in variables
        for region in REGIONS
        for i in range(2)
    )

    @staticmethod
    def update_recipe(
        recipe: Recipe,
        input_files: dict[SourceDatasetType, pandas.DataFrame],
    ) -> None:
        """Update the recipe."""
        # Update the dataset.
        recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
        dataset = recipe_variables["hus"]["additional_datasets"][0]
        dataset.pop("timerange")
        dataset["benchmark_dataset"] = True
        dataset["plot_label"] = "{dataset}.{ensemble}.{grid}".format(**dataset)
        recipe["datasets"] = [dataset]

        # Generate diagnostics for each region.
        diagnostics = {}
        for region in REGIONS:
            for diagnostic_name, orig_diagnostic in recipe["diagnostics"].items():
                # Create the diagnostic for the region.
                diagnostic = copy.deepcopy(orig_diagnostic)
                normalized_region = normalize_region(region)
                diagnostics[f"{diagnostic_name}-{normalized_region}"] = diagnostic

                for variable in diagnostic["variables"].values():
                    # Remove unwanted facets that are part of the dataset.
                    for facet in ("project", "exp", "ensemble", "grid"):
                        variable.pop(facet, None)
                    # Update the preprocessor so it extracts the region.
                    preprocessor_name = variable["preprocessor"]
                    preprocessor = copy.deepcopy(recipe["preprocessors"][preprocessor_name])
                    preprocessor["extract_shape"]["ids"] = {"Name": [region]}
                    variable["preprocessor"] = f"{preprocessor_name}-{normalized_region}"
                    recipe["preprocessors"][variable["preprocessor"]] = preprocessor

                # Update plot titles with region name.
                for script in diagnostic["scripts"].values():
                    for plot in script["plots"].values():
                        plot["pyplot_kwargs"] = {"title": f"{{long_name}} {region}"}
        recipe["diagnostics"] = diagnostics

update_recipe(recipe, input_files) staticmethod #

Update the recipe.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/regional_historical_changes.py
@staticmethod
def update_recipe(
    recipe: Recipe,
    input_files: dict[SourceDatasetType, pandas.DataFrame],
) -> None:
    """Update the recipe."""
    # Update the dataset.
    recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
    dataset = recipe_variables["hus"]["additional_datasets"][0]
    dataset.pop("timerange")
    dataset["benchmark_dataset"] = True
    dataset["plot_label"] = "{dataset}.{ensemble}.{grid}".format(**dataset)
    recipe["datasets"] = [dataset]

    # Generate diagnostics for each region.
    diagnostics = {}
    for region in REGIONS:
        for diagnostic_name, orig_diagnostic in recipe["diagnostics"].items():
            # Create the diagnostic for the region.
            diagnostic = copy.deepcopy(orig_diagnostic)
            normalized_region = normalize_region(region)
            diagnostics[f"{diagnostic_name}-{normalized_region}"] = diagnostic

            for variable in diagnostic["variables"].values():
                # Remove unwanted facets that are part of the dataset.
                for facet in ("project", "exp", "ensemble", "grid"):
                    variable.pop(facet, None)
                # Update the preprocessor so it extracts the region.
                preprocessor_name = variable["preprocessor"]
                preprocessor = copy.deepcopy(recipe["preprocessors"][preprocessor_name])
                preprocessor["extract_shape"]["ids"] = {"Name": [region]}
                variable["preprocessor"] = f"{preprocessor_name}-{normalized_region}"
                recipe["preprocessors"][variable["preprocessor"]] = preprocessor

            # Update plot titles with region name.
            for script in diagnostic["scripts"].values():
                for plot in script["plots"].values():
                    plot["pyplot_kwargs"] = {"title": f"{{long_name}} {region}"}
    recipe["diagnostics"] = diagnostics

RegionalHistoricalTimeSeries #

Bases: RegionalHistoricalAnnualCycle

Plot regional historical mean and anomaly of climate variables.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/regional_historical_changes.py
class RegionalHistoricalTimeSeries(RegionalHistoricalAnnualCycle):
    """
    Plot regional historical mean and anomaly of climate variables.
    """

    name = "Regional historical mean and anomaly of climate variables"
    slug = "regional-historical-timeseries"
    base_recipe = "ref/recipe_ref_timeseries_region.yml"

    variables = (
        "hus",
        "pr",
        "psl",
        "tas",
        "ua",
    )

    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.CMIP6,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": variables,
                        "experiment_id": "historical",
                        "table_id": "Amon",
                    },
                ),
            ),
            group_by=("source_id", "member_id", "grid_label"),
            constraints=(
                RequireTimerange(
                    group_by=("instance_id",),
                    start=PartialDateTime(1980, 1),
                    end=PartialDateTime(2014, 12),
                ),
                RequireFacets("variable_id", variables),
                AddSupplementaryDataset.from_defaults("areacella", SourceDatasetType.CMIP6),
            ),
        ),
        DataRequirement(
            source_type=SourceDatasetType.obs4MIPs,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": (
                            "psl",
                            "ua",
                        ),
                        "source_id": "ERA-5",
                        "frequency": "mon",
                    },
                ),
            ),
            group_by=("source_id",),
            constraints=(
                RequireTimerange(
                    group_by=("instance_id",),
                    start=PartialDateTime(1980, 1),
                    end=PartialDateTime(2014, 12),
                ),
            ),
            # TODO: Add obs4MIPs datasets once available and working:
            #
            # obs4MIPs dataset that cannot be ingested (https://github.com/Climate-REF/climate-ref/issues/260):
            # - GPCP-V2.3: pr
            #
            # Not yet available on obs4MIPs:
            # - ERA5: hus
            # - HadCRUT5_ground_5.0.1.0-analysis: tas
        ),
    )

    series = tuple(
        SeriesDefinition(
            file_pattern=f"{diagnostic}-{region}/allplots/*_{var_name}_*.nc",
            sel={"dim0": i},
            dimensions=(
                {
                    "region": region,
                    "variable_id": var_name,
                    "statistic": ("mean" if diagnostic == "timeseries_abs" else "mean anomaly"),
                }
                | ({} if i == 0 else {"reference_source_id": REFERENCE_DATASETS[var_name]})
            ),
            values_name=var_name,
            index_name="time",
            attributes=[],
        )
        for var_name in variables
        for region in REGIONS
        for diagnostic in ["timeseries_abs", "timeseries"]
        for i in range(2)
    )

RegionalHistoricalTrend #

Bases: ESMValToolDiagnostic

Plot regional historical trend of climate variables.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/regional_historical_changes.py
class RegionalHistoricalTrend(ESMValToolDiagnostic):
    """
    Plot regional historical trend of climate variables.
    """

    name = "Regional historical trend of climate variables"
    slug = "regional-historical-trend"
    base_recipe = "ref/recipe_ref_trend_regions.yml"

    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.CMIP6,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": (
                            "hus",
                            "pr",
                            "psl",
                            "tas",
                            "ua",
                        ),
                        "experiment_id": "historical",
                        "table_id": "Amon",
                    },
                ),
            ),
            group_by=("source_id", "member_id", "grid_label"),
            constraints=(
                RequireTimerange(
                    group_by=("instance_id",),
                    start=PartialDateTime(1980, 1),
                    end=PartialDateTime(2009, 12),
                ),
                AddSupplementaryDataset.from_defaults("areacella", SourceDatasetType.CMIP6),
            ),
        ),
        DataRequirement(
            source_type=SourceDatasetType.obs4MIPs,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": (
                            "psl",
                            "tas",
                            "ua",
                        ),
                        "source_id": "ERA-5",
                        "frequency": "mon",
                    },
                ),
            ),
            group_by=("source_id",),
            constraints=(
                RequireTimerange(
                    group_by=("instance_id",),
                    start=PartialDateTime(1980, 1),
                    end=PartialDateTime(2009, 12),
                ),
            ),
            # TODO: Add obs4MIPs datasets once available and working:
            #
            # obs4MIPs dataset that cannot be ingested (https://github.com/Climate-REF/climate-ref/issues/260):
            # - GPCP-V2.3: pr
            #
            # Not yet available on obs4MIPs:
            # - ERA5: hus
            # - HadCRUT5_ground_5.0.1.0-analysis: tas
        ),
    )
    facets = ("grid_label", "member_id", "source_id", "variable_id", "region", "metric")

    @staticmethod
    def update_recipe(
        recipe: Recipe,
        input_files: dict[SourceDatasetType, pandas.DataFrame],
    ) -> None:
        """Update the recipe."""
        recipe["datasets"] = []
        recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
        diagnostics = {}
        for diagnostic_name, diagnostic in recipe["diagnostics"].items():
            for variable_name, variable in diagnostic["variables"].items():
                if variable_name not in recipe_variables:
                    continue
                dataset = recipe_variables[variable_name]["additional_datasets"][0]
                dataset.pop("timerange")
                variable["additional_datasets"].append(dataset)
                diagnostics[diagnostic_name] = diagnostic
        recipe["diagnostics"] = diagnostics

    @classmethod
    def format_result(
        cls,
        result_dir: Path,
        execution_dataset: ExecutionDatasetCollection,
        metric_args: MetricBundleArgs,
        output_args: OutputBundleArgs,
    ) -> tuple[CMECMetric, CMECOutput]:
        """Format the result."""
        metric_args[MetricCV.DIMENSIONS.value] = {
            "json_structure": ["variable_id", "region", "metric"],
            "variable_id": {},
            "region": {},
            "metric": {"trend": {}},
        }
        for file in result_dir.glob("work/*_trends/plot/seaborn_barplot.nc"):
            ds = xarray.open_dataset(file)
            source_id = execution_dataset[SourceDatasetType.CMIP6].source_id.iloc[0]
            select = source_id == np.array([s.strip() for s in ds.dataset.values.astype(str).tolist()])
            ds.isel(dim0=select)
            variable_id = next(iter(ds.data_vars.keys()))
            metric_args[MetricCV.DIMENSIONS.value]["variable_id"][variable_id] = {}
            metric_args[MetricCV.RESULTS.value][variable_id] = {}
            for region_value, trend_value in zip(
                ds.shape_id.astype(str).values, fillvalues_to_nan(ds[variable_id].values)
            ):
                region = region_value.strip()
                trend = float(trend_value)
                if region not in metric_args[MetricCV.DIMENSIONS.value]["region"]:
                    metric_args[MetricCV.DIMENSIONS.value]["region"][region] = {}
                metric_args[MetricCV.RESULTS.value][variable_id][region] = {"trend": trend}

        return CMECMetric.model_validate(metric_args), CMECOutput.model_validate(output_args)

format_result(result_dir, execution_dataset, metric_args, output_args) classmethod #

Format the result.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/regional_historical_changes.py
@classmethod
def format_result(
    cls,
    result_dir: Path,
    execution_dataset: ExecutionDatasetCollection,
    metric_args: MetricBundleArgs,
    output_args: OutputBundleArgs,
) -> tuple[CMECMetric, CMECOutput]:
    """Format the result."""
    metric_args[MetricCV.DIMENSIONS.value] = {
        "json_structure": ["variable_id", "region", "metric"],
        "variable_id": {},
        "region": {},
        "metric": {"trend": {}},
    }
    for file in result_dir.glob("work/*_trends/plot/seaborn_barplot.nc"):
        ds = xarray.open_dataset(file)
        source_id = execution_dataset[SourceDatasetType.CMIP6].source_id.iloc[0]
        select = source_id == np.array([s.strip() for s in ds.dataset.values.astype(str).tolist()])
        ds.isel(dim0=select)
        variable_id = next(iter(ds.data_vars.keys()))
        metric_args[MetricCV.DIMENSIONS.value]["variable_id"][variable_id] = {}
        metric_args[MetricCV.RESULTS.value][variable_id] = {}
        for region_value, trend_value in zip(
            ds.shape_id.astype(str).values, fillvalues_to_nan(ds[variable_id].values)
        ):
            region = region_value.strip()
            trend = float(trend_value)
            if region not in metric_args[MetricCV.DIMENSIONS.value]["region"]:
                metric_args[MetricCV.DIMENSIONS.value]["region"][region] = {}
            metric_args[MetricCV.RESULTS.value][variable_id][region] = {"trend": trend}

    return CMECMetric.model_validate(metric_args), CMECOutput.model_validate(output_args)

update_recipe(recipe, input_files) staticmethod #

Update the recipe.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/regional_historical_changes.py
@staticmethod
def update_recipe(
    recipe: Recipe,
    input_files: dict[SourceDatasetType, pandas.DataFrame],
) -> None:
    """Update the recipe."""
    recipe["datasets"] = []
    recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
    diagnostics = {}
    for diagnostic_name, diagnostic in recipe["diagnostics"].items():
        for variable_name, variable in diagnostic["variables"].items():
            if variable_name not in recipe_variables:
                continue
            dataset = recipe_variables[variable_name]["additional_datasets"][0]
            dataset.pop("timerange")
            variable["additional_datasets"].append(dataset)
            diagnostics[diagnostic_name] = diagnostic
    recipe["diagnostics"] = diagnostics

SeaIceAreaBasic #

Bases: ESMValToolDiagnostic

Calculate seasonal cycle and time series of NH and SH sea ice area.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/sea_ice_area_basic.py
class SeaIceAreaBasic(ESMValToolDiagnostic):
    """
    Calculate seasonal cycle and time series of NH and SH sea ice area.
    """

    name = "Sea ice area basic"
    slug = "sea-ice-area-basic"
    base_recipe = "ref/recipe_ref_sea_ice_area_basic.yml"

    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.CMIP6,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": "siconc",
                        "experiment_id": "historical",
                        "table_id": "SImon",
                    },
                ),
            ),
            group_by=("source_id", "member_id", "grid_label"),
            constraints=(
                RequireTimerange(
                    group_by=("instance_id",),
                    start=PartialDateTime(1979, 1),
                    end=PartialDateTime(2014, 12),
                ),
                AddSupplementaryDataset.from_defaults("areacello", SourceDatasetType.CMIP6),
                RequireFacets("variable_id", ("siconc", "areacello")),
            ),
        ),
        # TODO: Use OSI-450-nh and OSI-450-sh from obs4MIPs once available.
    )
    facets = ()
    series = tuple(
        SeriesDefinition(
            file_pattern=f"siarea_min/allplots/timeseries_sea_ice_area_{region}_*.nc",
            sel={"dim0": i},
            dimensions=(
                {
                    "region": REGIONS[region],
                    "statistic": f"{MONTHS[region]} sea ice area",
                }
                | ({} if i == 0 else {"reference_source_id": f"OSI-450-{region}"})
            ),
            values_name="siconc",
            index_name="time",
            attributes=[],
        )
        for region in REGIONS
        for i in range(2)
    ) + tuple(
        SeriesDefinition(
            file_pattern=f"siarea_seas/allplots/annual_cycle_sea_ice_area_{region}_*.nc",
            sel={"dim0": i},
            dimensions=(
                {
                    "region": REGIONS[region],
                    "statistic": "20-year average seasonal cycle of the sea ice area",
                }
                | ({} if i == 0 else {"reference_source_id": f"OSI-450-{region}"})
            ),
            values_name="siconc",
            index_name="month_number",
            attributes=[],
        )
        for region in REGIONS
        for i in range(2)
    )

    @staticmethod
    def update_recipe(
        recipe: Recipe,
        input_files: dict[SourceDatasetType, pandas.DataFrame],
    ) -> None:
        """Update the recipe."""
        # Update datasets
        recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
        recipe["datasets"] = recipe_variables["siconc"]["additional_datasets"]

        # Use the timerange from the recipe, as defined in the variable.
        for dataset in recipe["datasets"]:
            dataset.pop("timerange")

        # Update observational datasets
        nh_obs = {
            "dataset": "OSI-450-nh",
            "mip": "OImon",
            "project": "OBS",
            "supplementary_variables": [
                {
                    "short_name": "areacello",
                    "mip": "fx",
                },
            ],
            "tier": 2,
            "type": "reanaly",
            "version": "v3",
        }
        sh_obs = nh_obs.copy()
        sh_obs["dataset"] = "OSI-450-sh"
        diagnostics = recipe["diagnostics"]
        diagnostics["siarea_min"]["variables"]["sea_ice_area_nh_sep"]["additional_datasets"] = [nh_obs]
        diagnostics["siarea_min"]["variables"]["sea_ice_area_sh_feb"]["additional_datasets"] = [sh_obs]
        diagnostics["siarea_seas"]["variables"]["sea_ice_area_nh"]["additional_datasets"] = [nh_obs]
        diagnostics["siarea_seas"]["variables"]["sea_ice_area_sh"]["additional_datasets"] = [sh_obs]

        # Update the captions.
        dataset = "{dataset}.{ensemble}.{grid}".format(**recipe["datasets"][0])
        for diagnostic in diagnostics.values():
            for script_settings in diagnostic["scripts"].values():
                for plot_settings in script_settings["plots"].values():
                    plot_settings["caption"] = plot_settings["caption"].replace("[dataset]", dataset)

update_recipe(recipe, input_files) staticmethod #

Update the recipe.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/sea_ice_area_basic.py
@staticmethod
def update_recipe(
    recipe: Recipe,
    input_files: dict[SourceDatasetType, pandas.DataFrame],
) -> None:
    """Update the recipe."""
    # Update datasets
    recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
    recipe["datasets"] = recipe_variables["siconc"]["additional_datasets"]

    # Use the timerange from the recipe, as defined in the variable.
    for dataset in recipe["datasets"]:
        dataset.pop("timerange")

    # Update observational datasets
    nh_obs = {
        "dataset": "OSI-450-nh",
        "mip": "OImon",
        "project": "OBS",
        "supplementary_variables": [
            {
                "short_name": "areacello",
                "mip": "fx",
            },
        ],
        "tier": 2,
        "type": "reanaly",
        "version": "v3",
    }
    sh_obs = nh_obs.copy()
    sh_obs["dataset"] = "OSI-450-sh"
    diagnostics = recipe["diagnostics"]
    diagnostics["siarea_min"]["variables"]["sea_ice_area_nh_sep"]["additional_datasets"] = [nh_obs]
    diagnostics["siarea_min"]["variables"]["sea_ice_area_sh_feb"]["additional_datasets"] = [sh_obs]
    diagnostics["siarea_seas"]["variables"]["sea_ice_area_nh"]["additional_datasets"] = [nh_obs]
    diagnostics["siarea_seas"]["variables"]["sea_ice_area_sh"]["additional_datasets"] = [sh_obs]

    # Update the captions.
    dataset = "{dataset}.{ensemble}.{grid}".format(**recipe["datasets"][0])
    for diagnostic in diagnostics.values():
        for script_settings in diagnostic["scripts"].values():
            for plot_settings in script_settings["plots"].values():
                plot_settings["caption"] = plot_settings["caption"].replace("[dataset]", dataset)

SeaIceSensitivity #

Bases: ESMValToolDiagnostic

Calculate sea ice sensitivity.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/sea_ice_sensitivity.py
class SeaIceSensitivity(ESMValToolDiagnostic):
    """
    Calculate sea ice sensitivity.
    """

    name = "Sea ice sensitivity"
    slug = "sea-ice-sensitivity"
    base_recipe = "recipe_seaice_sensitivity.yml"

    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.CMIP6,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": "siconc",
                        "experiment_id": "historical",
                        "table_id": "SImon",
                    },
                ),
                FacetFilter(
                    facets={
                        "variable_id": "tas",
                        "experiment_id": "historical",
                        "table_id": "Amon",
                    },
                ),
            ),
            group_by=("experiment_id",),  # this does nothing, but group_by cannot be empty
            constraints=(
                RequireTimerange(
                    group_by=("instance_id",),
                    start=PartialDateTime(1979, 1),
                    end=PartialDateTime(2014, 12),
                ),
                RequireFacets(
                    "variable_id",
                    required_facets=("siconc", "tas"),
                    group_by=("source_id", "member_id", "grid_label"),
                ),
                AddSupplementaryDataset.from_defaults("areacella", SourceDatasetType.CMIP6),
                AddSupplementaryDataset.from_defaults("areacello", SourceDatasetType.CMIP6),
                RequireFacets(
                    "variable_id",
                    required_facets=("areacello",),
                    group_by=("source_id", "grid_label"),
                ),
            ),
        ),
    )
    facets = ("experiment_id", "source_id", "region", "metric")

    @staticmethod
    def update_recipe(
        recipe: Recipe,
        input_files: dict[SourceDatasetType, pandas.DataFrame],
    ) -> None:
        """Update the recipe."""
        recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
        datasets = recipe_variables["tas"]["additional_datasets"]
        for dataset in datasets:
            dataset.pop("mip")
            dataset["timerange"] = "1979/2014"
        recipe["datasets"] = datasets

    @staticmethod
    def format_result(
        result_dir: Path,
        execution_dataset: ExecutionDatasetCollection,
        metric_args: MetricBundleArgs,
        output_args: OutputBundleArgs,
    ) -> tuple[CMECMetric, CMECOutput]:
        """Format the result."""
        metric_args[MetricCV.DIMENSIONS.value] = {
            "json_structure": [
                "source_id",
                "region",
                "metric",
            ],
            "source_id": {},
            "region": {},
            "metric": {},
        }
        for region in "antarctic", "arctic":
            df = pd.read_csv(
                result_dir / "work" / region / "sea_ice_sensitivity_script" / "plotted_values.csv"
            )
            df = df.rename(columns={"Unnamed: 0": "source_id"}).drop(columns=["label"])
            metric_args[MetricCV.DIMENSIONS.value]["region"][region] = {}
            for metric in df.columns[1:]:
                metric_args[MetricCV.DIMENSIONS.value]["metric"][metric] = {}
            for row in df.itertuples(index=False):
                source_id = row.source_id
                metric_args[MetricCV.DIMENSIONS.value]["source_id"][source_id] = {}
                for metric, value in zip(df.columns[1:], row[1:]):
                    if source_id not in metric_args[MetricCV.RESULTS.value]:
                        metric_args[MetricCV.RESULTS.value][source_id] = {}
                    if region not in metric_args[MetricCV.RESULTS.value][source_id]:
                        metric_args[MetricCV.RESULTS.value][source_id][region] = {}
                    metric_args[MetricCV.RESULTS.value][source_id][region][metric] = value

        return CMECMetric.model_validate(metric_args), CMECOutput.model_validate(output_args)

format_result(result_dir, execution_dataset, metric_args, output_args) staticmethod #

Format the result.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/sea_ice_sensitivity.py
@staticmethod
def format_result(
    result_dir: Path,
    execution_dataset: ExecutionDatasetCollection,
    metric_args: MetricBundleArgs,
    output_args: OutputBundleArgs,
) -> tuple[CMECMetric, CMECOutput]:
    """Format the result."""
    metric_args[MetricCV.DIMENSIONS.value] = {
        "json_structure": [
            "source_id",
            "region",
            "metric",
        ],
        "source_id": {},
        "region": {},
        "metric": {},
    }
    for region in "antarctic", "arctic":
        df = pd.read_csv(
            result_dir / "work" / region / "sea_ice_sensitivity_script" / "plotted_values.csv"
        )
        df = df.rename(columns={"Unnamed: 0": "source_id"}).drop(columns=["label"])
        metric_args[MetricCV.DIMENSIONS.value]["region"][region] = {}
        for metric in df.columns[1:]:
            metric_args[MetricCV.DIMENSIONS.value]["metric"][metric] = {}
        for row in df.itertuples(index=False):
            source_id = row.source_id
            metric_args[MetricCV.DIMENSIONS.value]["source_id"][source_id] = {}
            for metric, value in zip(df.columns[1:], row[1:]):
                if source_id not in metric_args[MetricCV.RESULTS.value]:
                    metric_args[MetricCV.RESULTS.value][source_id] = {}
                if region not in metric_args[MetricCV.RESULTS.value][source_id]:
                    metric_args[MetricCV.RESULTS.value][source_id][region] = {}
                metric_args[MetricCV.RESULTS.value][source_id][region][metric] = value

    return CMECMetric.model_validate(metric_args), CMECOutput.model_validate(output_args)

update_recipe(recipe, input_files) staticmethod #

Update the recipe.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/sea_ice_sensitivity.py
@staticmethod
def update_recipe(
    recipe: Recipe,
    input_files: dict[SourceDatasetType, pandas.DataFrame],
) -> None:
    """Update the recipe."""
    recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
    datasets = recipe_variables["tas"]["additional_datasets"]
    for dataset in datasets:
        dataset.pop("mip")
        dataset["timerange"] = "1979/2014"
    recipe["datasets"] = datasets

TransientClimateResponse #

Bases: ESMValToolDiagnostic

Calculate the global mean transient climate response for a dataset.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/tcr.py
class TransientClimateResponse(ESMValToolDiagnostic):
    """
    Calculate the global mean transient climate response for a dataset.
    """

    name = "Transient Climate Response"
    slug = "transient-climate-response"
    base_recipe = "recipe_tcr.yml"

    experiments = (
        "1pctCO2",
        "piControl",
    )
    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.CMIP6,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": ("tas",),
                        "experiment_id": experiments,
                        "table_id": "Amon",
                    },
                ),
            ),
            group_by=("source_id", "member_id", "grid_label"),
            constraints=(
                RequireContiguousTimerange(group_by=("instance_id",)),
                RequireOverlappingTimerange(group_by=("instance_id",)),
                RequireFacets("experiment_id", experiments),
                AddSupplementaryDataset.from_defaults("areacella", SourceDatasetType.CMIP6),
            ),
        ),
    )
    facets = ("grid_label", "member_id", "source_id", "region", "metric")
    series = (
        SeriesDefinition(
            file_pattern="tcr/calculate/{source_id}*.nc",
            dimensions={
                "statistic": "global annual mean tas anomaly relative to linear fit of piControl run",
            },
            values_name="tas_anomaly",
            index_name="time",
            attributes=[],
        ),
    )

    @staticmethod
    def update_recipe(
        recipe: Recipe,
        input_files: dict[SourceDatasetType, pandas.DataFrame],
    ) -> None:
        """Update the recipe."""
        # Only run the diagnostic that computes TCR for a single model.
        recipe["diagnostics"] = {
            "tcr": {
                "description": "Calculate TCR.",
                "variables": {
                    "tas": {
                        "preprocessor": "spatial_mean",
                    },
                },
                "scripts": {
                    "calculate": {
                        "script": "climate_metrics/tcr.py",
                        "calculate_mmm": False,
                    },
                },
            },
        }

        # Prepare updated datasets section in recipe. It contains two
        # datasets, one for the "1pctCO2" and one for the "piControl"
        # experiment.
        recipe_variables = dataframe_to_recipe(
            input_files[SourceDatasetType.CMIP6],
            equalize_timerange=True,
        )
        recipe["datasets"] = recipe_variables["tas"]["additional_datasets"]

        # Remove keys from the recipe that are only used for YAML anchors
        keys_to_remove = [
            "TCR",
            "SCATTERPLOT",
            "VAR_SETTING",
        ]
        for key in keys_to_remove:
            recipe.pop(key, None)

    @staticmethod
    def format_result(
        result_dir: Path,
        execution_dataset: ExecutionDatasetCollection,
        metric_args: MetricBundleArgs,
        output_args: OutputBundleArgs,
    ) -> tuple[CMECMetric, CMECOutput]:
        """Format the result."""
        tcr_ds = xarray.open_dataset(result_dir / "work" / "tcr" / "calculate" / "tcr.nc")
        tcr = float(fillvalues_to_nan(tcr_ds["tcr"].values)[0])

        # Update the diagnostic bundle arguments with the computed diagnostics.
        metric_args[MetricCV.DIMENSIONS.value] = {
            "json_structure": [
                "region",
                "metric",
            ],
            "region": {"global": {}},
            "metric": {"tcr": {}},
        }
        metric_args[MetricCV.RESULTS.value] = {
            "global": {
                "tcr": tcr,
            },
        }

        return CMECMetric.model_validate(metric_args), CMECOutput.model_validate(output_args)

format_result(result_dir, execution_dataset, metric_args, output_args) staticmethod #

Format the result.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/tcr.py
@staticmethod
def format_result(
    result_dir: Path,
    execution_dataset: ExecutionDatasetCollection,
    metric_args: MetricBundleArgs,
    output_args: OutputBundleArgs,
) -> tuple[CMECMetric, CMECOutput]:
    """Format the result."""
    tcr_ds = xarray.open_dataset(result_dir / "work" / "tcr" / "calculate" / "tcr.nc")
    tcr = float(fillvalues_to_nan(tcr_ds["tcr"].values)[0])

    # Update the diagnostic bundle arguments with the computed diagnostics.
    metric_args[MetricCV.DIMENSIONS.value] = {
        "json_structure": [
            "region",
            "metric",
        ],
        "region": {"global": {}},
        "metric": {"tcr": {}},
    }
    metric_args[MetricCV.RESULTS.value] = {
        "global": {
            "tcr": tcr,
        },
    }

    return CMECMetric.model_validate(metric_args), CMECOutput.model_validate(output_args)

update_recipe(recipe, input_files) staticmethod #

Update the recipe.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/tcr.py
@staticmethod
def update_recipe(
    recipe: Recipe,
    input_files: dict[SourceDatasetType, pandas.DataFrame],
) -> None:
    """Update the recipe."""
    # Only run the diagnostic that computes TCR for a single model.
    recipe["diagnostics"] = {
        "tcr": {
            "description": "Calculate TCR.",
            "variables": {
                "tas": {
                    "preprocessor": "spatial_mean",
                },
            },
            "scripts": {
                "calculate": {
                    "script": "climate_metrics/tcr.py",
                    "calculate_mmm": False,
                },
            },
        },
    }

    # Prepare updated datasets section in recipe. It contains two
    # datasets, one for the "1pctCO2" and one for the "piControl"
    # experiment.
    recipe_variables = dataframe_to_recipe(
        input_files[SourceDatasetType.CMIP6],
        equalize_timerange=True,
    )
    recipe["datasets"] = recipe_variables["tas"]["additional_datasets"]

    # Remove keys from the recipe that are only used for YAML anchors
    keys_to_remove = [
        "TCR",
        "SCATTERPLOT",
        "VAR_SETTING",
    ]
    for key in keys_to_remove:
        recipe.pop(key, None)

TransientClimateResponseEmissions #

Bases: ESMValToolDiagnostic

Calculate the global mean Transient Climate Response to Cumulative CO2 Emissions.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/tcre.py
class TransientClimateResponseEmissions(ESMValToolDiagnostic):
    """
    Calculate the global mean Transient Climate Response to Cumulative CO2 Emissions.
    """

    name = "Transient Climate Response to Cumulative CO2 Emissions"
    slug = "transient-climate-response-emissions"
    base_recipe = "recipe_tcre.yml"

    variables = (
        "tas",
        "fco2antt",
    )
    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.CMIP6,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": variables,
                        "experiment_id": "esm-1pctCO2",
                        "table_id": "Amon",
                    },
                ),
                FacetFilter(
                    facets={
                        "variable_id": "tas",
                        "experiment_id": "esm-piControl",
                        "table_id": "Amon",
                    },
                ),
            ),
            group_by=("source_id", "member_id", "grid_label"),
            constraints=(
                RequireContiguousTimerange(group_by=("instance_id",)),
                RequireOverlappingTimerange(group_by=("instance_id",)),
                RequireFacets("experiment_id", ("esm-1pctCO2", "esm-piControl")),
                RequireFacets("variable_id", variables),
                AddSupplementaryDataset.from_defaults("areacella", SourceDatasetType.CMIP6),
            ),
        ),
    )
    facets = ("grid_label", "member_id", "source_id", "region", "metric")
    # TODO: the ESMValTool diagnostic script does not save the data for the timeseries.
    series = tuple()

    @staticmethod
    def update_recipe(
        recipe: Recipe,
        input_files: dict[SourceDatasetType, pandas.DataFrame],
    ) -> None:
        """Update the recipe."""
        # Prepare updated datasets section in recipe. It contains three
        # datasets, "tas" and "fco2antt" for the "esm-1pctCO2" and just "tas"
        # for the "esm-piControl" experiment.
        recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
        tas_esm_1pctCO2 = next(
            ds for ds in recipe_variables["tas"]["additional_datasets"] if ds["exp"] == "esm-1pctCO2"
        )
        fco2antt_esm_1pctCO2 = next(
            ds for ds in recipe_variables["fco2antt"]["additional_datasets"] if ds["exp"] == "esm-1pctCO2"
        )
        tas_esm_piControl = next(
            ds for ds in recipe_variables["tas"]["additional_datasets"] if ds["exp"] == "esm-piControl"
        )
        tas_esm_piControl["timerange"] = tas_esm_1pctCO2["timerange"]

        recipe["diagnostics"]["tcre"]["variables"] = {
            "tas_esm-1pctCO2": {
                "short_name": "tas",
                "preprocessor": "global_annual_mean_anomaly",
                "additional_datasets": [tas_esm_1pctCO2],
            },
            "tas_esm-piControl": {
                "short_name": "tas",
                "preprocessor": "global_annual_mean_anomaly",
                "additional_datasets": [tas_esm_piControl],
            },
            "fco2antt": {
                "preprocessor": "global_cumulative_sum",
                "additional_datasets": [fco2antt_esm_1pctCO2],
            },
        }
        recipe["diagnostics"].pop("barplot")

        # Update descriptions.
        dataset = tas_esm_1pctCO2["dataset"]
        ensemble = tas_esm_1pctCO2["ensemble"]
        settings = recipe["diagnostics"]["tcre"]["scripts"]["calculate_tcre"]
        settings["caption"] = (
            settings["caption"].replace("MPI-ESM1-2-LR", dataset).replace("r1i1p1f1", ensemble)
        )
        settings["pyplot_kwargs"]["title"] = (
            settings["pyplot_kwargs"]["title"].replace("MPI-ESM1-2-LR", dataset).replace("r1i1p1f1", ensemble)
        )

    @staticmethod
    def format_result(
        result_dir: Path,
        execution_dataset: ExecutionDatasetCollection,
        metric_args: MetricBundleArgs,
        output_args: OutputBundleArgs,
    ) -> tuple[CMECMetric, CMECOutput]:
        """Format the result."""
        tcre_ds = xarray.open_dataset(result_dir / "work" / "tcre" / "calculate_tcre" / "tcre.nc")
        tcre = float(fillvalues_to_nan(tcre_ds["tcre"].values)[0])

        # Update the diagnostic bundle arguments with the computed diagnostics.
        metric_args[MetricCV.DIMENSIONS.value] = {
            "json_structure": [
                "region",
                "metric",
            ],
            "region": {"global": {}},
            "metric": {"tcre": {}},
        }
        metric_args[MetricCV.RESULTS.value] = {
            "global": {
                "tcre": tcre,
            },
        }
        return CMECMetric.model_validate(metric_args), CMECOutput.model_validate(output_args)

format_result(result_dir, execution_dataset, metric_args, output_args) staticmethod #

Format the result.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/tcre.py
@staticmethod
def format_result(
    result_dir: Path,
    execution_dataset: ExecutionDatasetCollection,
    metric_args: MetricBundleArgs,
    output_args: OutputBundleArgs,
) -> tuple[CMECMetric, CMECOutput]:
    """Format the result."""
    tcre_ds = xarray.open_dataset(result_dir / "work" / "tcre" / "calculate_tcre" / "tcre.nc")
    tcre = float(fillvalues_to_nan(tcre_ds["tcre"].values)[0])

    # Update the diagnostic bundle arguments with the computed diagnostics.
    metric_args[MetricCV.DIMENSIONS.value] = {
        "json_structure": [
            "region",
            "metric",
        ],
        "region": {"global": {}},
        "metric": {"tcre": {}},
    }
    metric_args[MetricCV.RESULTS.value] = {
        "global": {
            "tcre": tcre,
        },
    }
    return CMECMetric.model_validate(metric_args), CMECOutput.model_validate(output_args)

update_recipe(recipe, input_files) staticmethod #

Update the recipe.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/tcre.py
@staticmethod
def update_recipe(
    recipe: Recipe,
    input_files: dict[SourceDatasetType, pandas.DataFrame],
) -> None:
    """Update the recipe."""
    # Prepare updated datasets section in recipe. It contains three
    # datasets, "tas" and "fco2antt" for the "esm-1pctCO2" and just "tas"
    # for the "esm-piControl" experiment.
    recipe_variables = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])
    tas_esm_1pctCO2 = next(
        ds for ds in recipe_variables["tas"]["additional_datasets"] if ds["exp"] == "esm-1pctCO2"
    )
    fco2antt_esm_1pctCO2 = next(
        ds for ds in recipe_variables["fco2antt"]["additional_datasets"] if ds["exp"] == "esm-1pctCO2"
    )
    tas_esm_piControl = next(
        ds for ds in recipe_variables["tas"]["additional_datasets"] if ds["exp"] == "esm-piControl"
    )
    tas_esm_piControl["timerange"] = tas_esm_1pctCO2["timerange"]

    recipe["diagnostics"]["tcre"]["variables"] = {
        "tas_esm-1pctCO2": {
            "short_name": "tas",
            "preprocessor": "global_annual_mean_anomaly",
            "additional_datasets": [tas_esm_1pctCO2],
        },
        "tas_esm-piControl": {
            "short_name": "tas",
            "preprocessor": "global_annual_mean_anomaly",
            "additional_datasets": [tas_esm_piControl],
        },
        "fco2antt": {
            "preprocessor": "global_cumulative_sum",
            "additional_datasets": [fco2antt_esm_1pctCO2],
        },
    }
    recipe["diagnostics"].pop("barplot")

    # Update descriptions.
    dataset = tas_esm_1pctCO2["dataset"]
    ensemble = tas_esm_1pctCO2["ensemble"]
    settings = recipe["diagnostics"]["tcre"]["scripts"]["calculate_tcre"]
    settings["caption"] = (
        settings["caption"].replace("MPI-ESM1-2-LR", dataset).replace("r1i1p1f1", ensemble)
    )
    settings["pyplot_kwargs"]["title"] = (
        settings["pyplot_kwargs"]["title"].replace("MPI-ESM1-2-LR", dataset).replace("r1i1p1f1", ensemble)
    )

ZeroEmissionCommitment #

Bases: ESMValToolDiagnostic

Calculate the global mean Zero Emission Commitment (ZEC) temperature.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/zec.py
class ZeroEmissionCommitment(ESMValToolDiagnostic):
    """
    Calculate the global mean Zero Emission Commitment (ZEC) temperature.
    """

    name = "Zero Emission Commitment"
    slug = "zero-emission-commitment"
    base_recipe = "recipe_zec.yml"

    experiments = (
        "1pctCO2",
        "esm-1pct-brch-1000PgC",
    )
    data_requirements = (
        DataRequirement(
            source_type=SourceDatasetType.CMIP6,
            filters=(
                FacetFilter(
                    facets={
                        "variable_id": ("tas",),
                        "experiment_id": experiments,
                        "table_id": "Amon",
                    },
                ),
            ),
            group_by=("source_id", "member_id", "grid_label"),
            constraints=(
                RequireContiguousTimerange(group_by=("instance_id",)),
                RequireOverlappingTimerange(group_by=("instance_id",)),
                RequireFacets("experiment_id", experiments),
                AddSupplementaryDataset.from_defaults("areacella", SourceDatasetType.CMIP6),
            ),
        ),
    )
    facets = ("grid_label", "member_id", "source_id", "region", "metric")
    series = (
        SeriesDefinition(
            file_pattern="work/zec/zec/zec.nc",
            sel={"dim0": 0},
            dimensions={
                "statistic": "zec",
            },
            values_name="zec",
            index_name="time",
            attributes=[],
        ),
    )

    @staticmethod
    def update_recipe(
        recipe: Recipe,
        input_files: dict[SourceDatasetType, pandas.DataFrame],
    ) -> None:
        """Update the recipe."""
        # Prepare updated datasets section in recipe. It contains two
        # datasets, one for the "esm-1pct-brch-1000PgC" and one for the "piControl"
        # experiment.
        datasets = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])["tas"]["additional_datasets"]
        base_dataset = next(ds for ds in datasets if ds["exp"] == "1pctCO2")
        dataset = next(ds for ds in datasets if ds["exp"] == "esm-1pct-brch-1000PgC")
        start = dataset["timerange"].split("/")[0]
        base_start = f"{int(start[:4]) - 10:04d}{start[4:]}"
        base_end = f"{int(start[:4]) + 10:04d}{start[4:]}"
        base_dataset["timerange"] = f"{base_start}/{base_end}"
        variables = recipe["diagnostics"]["zec"]["variables"]
        variables["tas_base"] = {
            "short_name": "tas",
            "preprocessor": "anomaly_base",
            "additional_datasets": [base_dataset],
        }
        variables["tas"] = {
            "preprocessor": "spatial_mean",
            "additional_datasets": [dataset],
        }

    @classmethod
    def format_result(
        cls,
        result_dir: Path,
        execution_dataset: ExecutionDatasetCollection,
        metric_args: MetricBundleArgs,
        output_args: OutputBundleArgs,
    ) -> tuple[CMECMetric, CMECOutput]:
        """Format the result."""
        zec_ds = xarray.open_dataset(result_dir / "work" / "zec" / "zec" / "zec_50.nc")
        zec = float(fillvalues_to_nan(zec_ds["zec"].values)[0])

        # Update the diagnostic bundle arguments with the computed diagnostics.
        metric_args[MetricCV.DIMENSIONS.value] = {
            "json_structure": ["region", "metric"],
            "region": {"global": {}},
            "metric": {"zec": {}},
        }
        metric_args[MetricCV.RESULTS.value] = {
            "global": {
                "zec": zec,
            },
        }

        return CMECMetric.model_validate(metric_args), CMECOutput.model_validate(output_args)

format_result(result_dir, execution_dataset, metric_args, output_args) classmethod #

Format the result.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/zec.py
@classmethod
def format_result(
    cls,
    result_dir: Path,
    execution_dataset: ExecutionDatasetCollection,
    metric_args: MetricBundleArgs,
    output_args: OutputBundleArgs,
) -> tuple[CMECMetric, CMECOutput]:
    """Format the result."""
    zec_ds = xarray.open_dataset(result_dir / "work" / "zec" / "zec" / "zec_50.nc")
    zec = float(fillvalues_to_nan(zec_ds["zec"].values)[0])

    # Update the diagnostic bundle arguments with the computed diagnostics.
    metric_args[MetricCV.DIMENSIONS.value] = {
        "json_structure": ["region", "metric"],
        "region": {"global": {}},
        "metric": {"zec": {}},
    }
    metric_args[MetricCV.RESULTS.value] = {
        "global": {
            "zec": zec,
        },
    }

    return CMECMetric.model_validate(metric_args), CMECOutput.model_validate(output_args)

update_recipe(recipe, input_files) staticmethod #

Update the recipe.

Source code in packages/climate-ref-esmvaltool/src/climate_ref_esmvaltool/diagnostics/zec.py
@staticmethod
def update_recipe(
    recipe: Recipe,
    input_files: dict[SourceDatasetType, pandas.DataFrame],
) -> None:
    """Update the recipe."""
    # Prepare updated datasets section in recipe. It contains two
    # datasets, one for the "esm-1pct-brch-1000PgC" and one for the "piControl"
    # experiment.
    datasets = dataframe_to_recipe(input_files[SourceDatasetType.CMIP6])["tas"]["additional_datasets"]
    base_dataset = next(ds for ds in datasets if ds["exp"] == "1pctCO2")
    dataset = next(ds for ds in datasets if ds["exp"] == "esm-1pct-brch-1000PgC")
    start = dataset["timerange"].split("/")[0]
    base_start = f"{int(start[:4]) - 10:04d}{start[4:]}"
    base_end = f"{int(start[:4]) + 10:04d}{start[4:]}"
    base_dataset["timerange"] = f"{base_start}/{base_end}"
    variables = recipe["diagnostics"]["zec"]["variables"]
    variables["tas_base"] = {
        "short_name": "tas",
        "preprocessor": "anomaly_base",
        "additional_datasets": [base_dataset],
    }
    variables["tas"] = {
        "preprocessor": "spatial_mean",
        "additional_datasets": [dataset],
    }

sub-packages#

Sub-package Description
base
climate_at_global_warming_levels
climate_drivers_for_fire
cloud_radiative_effects
cloud_scatterplots
ecs
enso
example
regional_historical_changes
sea_ice_area_basic
sea_ice_sensitivity
tcr
tcre
zec